diff options
| -rw-r--r-- | include/linux/sched/sysctl.h | 5 | ||||
| -rw-r--r-- | kernel/sched/core.c | 81 | ||||
| -rw-r--r-- | kernel/sysctl.c | 13 |
3 files changed, 70 insertions, 29 deletions
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 0a6047acb5d2..eaf153b35cfa 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -47,7 +47,10 @@ extern unsigned int sysctl_sched_window_stats_policy; extern unsigned int sysctl_sched_init_task_load_pct; #endif -extern unsigned int sysctl_sched_task_migrate_notify_pct; +#ifdef CONFIG_SCHED_FREQ_INPUT +extern int sysctl_sched_freq_inc_notify_slack_pct; +extern int sysctl_sched_freq_dec_notify_slack_pct; +#endif #ifdef CONFIG_SCHED_HMP extern unsigned int sysctl_sched_enable_hmp_task_placement; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1352245a0fc4..3a869c46dcbf 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1096,27 +1096,29 @@ unsigned int __read_mostly sched_use_pelt; unsigned int max_possible_efficiency = 1024; unsigned int min_possible_efficiency = 1024; -__read_mostly unsigned int sysctl_sched_task_migrate_notify_pct = 25; -unsigned int sched_task_migrate_notify; +__read_mostly int sysctl_sched_freq_inc_notify_slack_pct; +__read_mostly int sysctl_sched_freq_dec_notify_slack_pct = 25; -int sched_migrate_notify_proc_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos) +/* Returns how undercommitted a CPU is given its current frequency and + * task load (as measured in the previous window). Returns this value + * as a percentage of the CPU's maximum frequency. A negative value + * means the CPU is overcommitted at its current frequency. + */ +int rq_freq_margin(struct rq *rq) { - int ret; - unsigned int *data = (unsigned int *)table->data; - - ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); - if (ret || !write) - return ret; - - if (*data > 100) - return -EINVAL; + unsigned int freq_required; + int margin; - sched_task_migrate_notify = div64_u64((u64)*data * - (u64)max_task_load(), 100); + freq_required = scale_task_load(rq->prev_runnable_sum, rq->cpu); + freq_required *= 128; + freq_required /= max_task_load(); + freq_required *= rq->max_possible_freq; + freq_required /= 128; - return 0; + margin = rq->cur_freq - freq_required; + margin *= 100; + margin /= (int)rq->max_possible_freq; + return margin; } /* @@ -1437,6 +1439,11 @@ void sched_set_window(u64 window_start, unsigned int window_size) #else /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */ +static inline int rq_freq_margin(struct rq *rq) +{ + return INT_MAX; +} + static inline void init_cpu_efficiency(void) {} static inline void mark_task_starting(struct task_struct *p) {} @@ -1739,23 +1746,32 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) if (p->state == TASK_WAKING) double_rq_unlock(src_rq, dest_rq); - /* Is p->ravg.prev_window significant? Trigger a load - alert notifier if so. */ - if (p->ravg.prev_window > sched_task_migrate_notify && - !cpumask_test_cpu(new_cpu, - &src_rq->freq_domain_cpumask)) { + if (cpumask_test_cpu(new_cpu, + &src_rq->freq_domain_cpumask)) + goto done; + + /* Evaluate possible frequency notifications for + * source and destination CPUs in different frequency + * domains. */ + if (rq_freq_margin(dest_rq) < + sysctl_sched_freq_inc_notify_slack_pct) atomic_notifier_call_chain( &load_alert_notifier_head, 0, - (void *)(long)task_cpu(p)); + (void *)(long)new_cpu); + + if (rq_freq_margin(src_rq) > + sysctl_sched_freq_dec_notify_slack_pct) atomic_notifier_call_chain( &load_alert_notifier_head, 0, - (void *)(long)new_cpu); - } + (void *)(long)task_cpu(p)); } #endif } +#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP) +done: +#endif __set_task_cpu(p, new_cpu); } @@ -2461,6 +2477,14 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) if (src_cpu != cpu) { wake_flags |= WF_MIGRATED; set_task_cpu(p, cpu); + } else { +#ifdef CONFIG_SCHED_FREQ_INPUT + if (rq_freq_margin(cpu_rq(cpu)) < + sysctl_sched_freq_inc_notify_slack_pct) + atomic_notifier_call_chain( + &load_alert_notifier_head, 0, + (void *)(long)cpu); +#endif } #endif /* CONFIG_SMP */ @@ -2884,6 +2908,13 @@ void wake_up_new_task(struct task_struct *p) rq = __task_rq_lock(p); mark_task_starting(p); +#ifdef CONFIG_SCHED_FREQ_INPUT + if (rq_freq_margin(task_rq(p)) < + sysctl_sched_freq_inc_notify_slack_pct) + atomic_notifier_call_chain( + &load_alert_notifier_head, 0, + (void *)(long)task_cpu(p)); +#endif activate_task(rq, p, 0); p->on_rq = TASK_ON_RQ_QUEUED; trace_sched_wakeup_new(p); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 66fc408900c2..cdc63fb78433 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -294,11 +294,18 @@ static struct ctl_table kern_table[] = { }, #ifdef CONFIG_SCHED_FREQ_INPUT { - .procname = "sched_task_migrate_notify", - .data = &sysctl_sched_task_migrate_notify_pct, + .procname = "sched_freq_inc_notify_slack_pct", + .data = &sysctl_sched_freq_inc_notify_slack_pct, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = sched_migrate_notify_proc_handler, + .proc_handler = proc_dointvec, + }, + { + .procname = "sched_freq_dec_notify_slack_pct", + .data = &sysctl_sched_freq_dec_notify_slack_pct, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, }, #endif #if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP) |
