summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/trace/events/sched.h16
-rw-r--r--kernel/sched/core.c120
-rw-r--r--kernel/sched/sched.h3
-rw-r--r--kernel/sysctl.c7
5 files changed, 34 insertions, 113 deletions
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 04e153610c9f..8cadba92aee0 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -44,7 +44,6 @@ extern unsigned int sysctl_sched_wakeup_load_threshold;
extern unsigned int sysctl_sched_window_stats_policy;
extern unsigned int sysctl_sched_account_wait_time;
extern unsigned int sysctl_sched_ravg_hist_size;
-extern unsigned int sysctl_sched_gov_response_time;
extern unsigned int sysctl_sched_freq_account_wait_time;
extern unsigned int sysctl_sched_migration_fixup;
extern unsigned int sysctl_sched_heavy_task_pct;
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 8829075d0746..2d76ec1aa1cd 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -408,24 +408,24 @@ TRACE_EVENT(sched_get_busy,
TRACE_EVENT(sched_freq_alert,
- TP_PROTO(int cpu, unsigned int cur_freq, unsigned int freq_required),
+ TP_PROTO(int cpu, u64 old_load, u64 new_load),
- TP_ARGS(cpu, cur_freq, freq_required),
+ TP_ARGS(cpu, old_load, new_load),
TP_STRUCT__entry(
__field( int, cpu )
- __field(unsigned int, cur_freq )
- __field(unsigned int, freq_required )
+ __field( u64, old_load )
+ __field( u64, new_load )
),
TP_fast_assign(
__entry->cpu = cpu;
- __entry->cur_freq = cur_freq;
- __entry->freq_required = freq_required;
+ __entry->old_load = old_load;
+ __entry->new_load = new_load;
),
- TP_printk("cpu %d cur_freq=%u freq_required=%u",
- __entry->cpu, __entry->cur_freq, __entry->freq_required)
+ TP_printk("cpu %d old_load=%llu new_load=%llu",
+ __entry->cpu, __entry->old_load, __entry->new_load)
);
#endif /* CONFIG_SCHED_FREQ_INPUT */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2ecc87e12491..733ce45430b9 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1191,12 +1191,6 @@ static __read_mostly unsigned int sched_freq_account_wait_time;
__read_mostly unsigned int sysctl_sched_freq_account_wait_time;
/*
- * Force-issue notification to governor if we waited long enough since sending
- * last notification and did not see any freq change.
- */
-__read_mostly unsigned int sysctl_sched_gov_response_time = 10000000;
-
-/*
* For increase, send notification if
* freq_required - cur_freq > sysctl_sched_freq_inc_notify
*/
@@ -1328,96 +1322,51 @@ nearly_same_freq(unsigned int cur_freq, unsigned int freq_required)
return delta < sysctl_sched_freq_dec_notify;
}
-/* Is governor late in responding? */
-static inline int freq_request_timeout(struct rq *rq)
+/* Convert busy time to frequency equivalent */
+static inline unsigned int load_to_freq(struct rq *rq, u64 load)
{
- u64 now = sched_clock();
+ unsigned int freq;
+
+ load = scale_load_to_cpu(load, cpu_of(rq));
+ load *= 128;
+ load = div64_u64(load, max_task_load());
+
+ freq = load * rq->max_possible_freq;
+ freq /= 128;
- return ((now - rq->freq_requested_ts) > sysctl_sched_gov_response_time);
+ return freq;
}
/* Should scheduler alert governor for changing frequency? */
-static int send_notification(struct rq *rq, unsigned int freq_required)
+static int send_notification(struct rq *rq)
{
- int cpu, rc = 0;
- unsigned int freq_requested = rq->freq_requested;
- struct rq *domain_rq;
- unsigned long flags;
-
- if (freq_required > rq->max_freq)
- freq_required = rq->max_freq;
- else if (freq_required < rq->min_freq)
- freq_required = rq->min_freq;
+ unsigned int cur_freq, freq_required;
- if (nearly_same_freq(rq->cur_freq, freq_required))
+ if (!sched_enable_hmp)
return 0;
- if (freq_requested && nearly_same_freq(freq_requested, freq_required) &&
- !freq_request_timeout(rq))
- return 0;
+ cur_freq = load_to_freq(rq, rq->old_busy_time);
+ freq_required = load_to_freq(rq, rq->prev_runnable_sum);
- cpu = cpumask_first(&rq->freq_domain_cpumask);
- if (cpu >= nr_cpu_ids)
+ if (nearly_same_freq(cur_freq, freq_required))
return 0;
- domain_rq = cpu_rq(cpu);
- raw_spin_lock_irqsave(&domain_rq->lock, flags);
- freq_requested = domain_rq->freq_requested;
- if (!freq_requested ||
- !nearly_same_freq(freq_requested, freq_required) ||
- freq_request_timeout(domain_rq)) {
-
- u64 now = sched_clock();
-
- /*
- * Cache the new frequency requested in rq of all cpus that are
- * in same freq domain. This saves frequent grabbing of
- * domain_rq->lock
- */
- for_each_cpu(cpu, &rq->freq_domain_cpumask) {
- cpu_rq(cpu)->freq_requested = freq_required;
- cpu_rq(cpu)->freq_requested_ts = now;
- }
- rc = 1;
- }
- raw_spin_unlock_irqrestore(&domain_rq->lock, flags);
-
- return rc;
+ return 1;
}
/* Alert governor if there is a need to change frequency */
void check_for_freq_change(struct rq *rq)
{
- unsigned int freq_required;
- int i, max_demand_cpu = 0;
- u64 max_demand = 0;
-
- if (!sched_enable_hmp)
- return;
-
- /* Find out max demand across cpus in same frequency domain */
- for_each_cpu(i, &rq->freq_domain_cpumask) {
- if (cpu_rq(i)->prev_runnable_sum > max_demand) {
- max_demand = cpu_rq(i)->prev_runnable_sum;
- max_demand_cpu = i;
- }
- }
-
- max_demand = scale_load_to_cpu(max_demand, rq->cpu);
- max_demand *= 128;
- max_demand = div64_u64(max_demand, max_task_load());
-
- freq_required = max_demand * rq->max_possible_freq;
- freq_required /= 128;
+ int cpu = cpu_of(rq);
- if (!send_notification(rq, freq_required))
+ if (!send_notification(rq))
return;
- trace_sched_freq_alert(max_demand_cpu, rq->cur_freq, freq_required);
+ trace_sched_freq_alert(cpu, rq->old_busy_time, rq->prev_runnable_sum);
atomic_notifier_call_chain(
&load_alert_notifier_head, 0,
- (void *)(long)max_demand_cpu);
+ (void *)(long)cpu);
}
static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,
@@ -2185,6 +2134,7 @@ unsigned long sched_get_busy(int cpu)
*/
raw_spin_lock_irqsave(&rq->lock, flags);
update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0);
+ load = rq->old_busy_time = rq->prev_runnable_sum;
raw_spin_unlock_irqrestore(&rq->lock, flags);
/*
@@ -2193,7 +2143,7 @@ unsigned long sched_get_busy(int cpu)
* Note that scale_load_to_cpu() scales load in reference to
* rq->max_freq
*/
- load = scale_load_to_cpu(rq->prev_runnable_sum, cpu);
+ load = scale_load_to_cpu(load, cpu);
load = div64_u64(load * (u64)rq->max_freq, (u64)rq->max_possible_freq);
load = div64_u64(load, NSEC_PER_USEC);
@@ -2511,25 +2461,6 @@ static int cpufreq_notifier_trans(struct notifier_block *nb,
cpu_rq(cpu)->cur_freq = new_freq;
raw_spin_unlock_irqrestore(&rq->lock, flags);
-#ifdef CONFIG_SCHED_FREQ_INPUT
- /* clear freq request for CPUs in the same freq domain */
- if (!rq->freq_requested)
- return 0;
-
- /* The first CPU (and its rq lock) in a freq domain is used to
- * serialize all freq change tests and notifications for CPUs
- * in that domain. */
- cpu = cpumask_first(&rq->freq_domain_cpumask);
- if (cpu >= nr_cpu_ids)
- return 0;
-
- rq = cpu_rq(cpu);
- raw_spin_lock_irqsave(&rq->lock, flags);
- for_each_cpu(cpu, &rq->freq_domain_cpumask)
- cpu_rq(cpu)->freq_requested = 0;
- raw_spin_unlock_irqrestore(&rq->lock, flags);
-#endif
-
return 0;
}
@@ -9194,9 +9125,8 @@ void __init sched_init(void)
rq->nr_small_tasks = rq->nr_big_tasks = 0;
rq->hmp_flags = 0;
#ifdef CONFIG_SCHED_FREQ_INPUT
- rq->freq_requested = 0;
- rq->freq_requested_ts = 0;
rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
+ rq->old_busy_time = 0;
#endif
#endif
rq->max_idle_balance_cost = sysctl_sched_migration_cost;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 1fc108e5cdda..a4c7dde3c892 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -660,8 +660,7 @@ struct rq {
u64 window_start;
#ifdef CONFIG_SCHED_FREQ_INPUT
- unsigned int freq_requested;
- u64 freq_requested_ts;
+ unsigned int old_busy_time;
#endif
#endif
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 2d5ac1775889..e7b03e816bd7 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -335,13 +335,6 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = sched_hmp_proc_update_handler,
},
- {
- .procname = "sched_gov_response_time",
- .data = &sysctl_sched_gov_response_time,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
#endif
#ifdef CONFIG_SCHED_HMP
{