summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorJoonwoo Park <joonwoop@codeaurora.org>2016-03-28 14:22:52 -0700
committerKyle Yan <kyan@codeaurora.org>2016-04-27 19:13:06 -0700
commit55b8e041e6b3c2f7f093d275d6fdf00e7c673b0c (patch)
tree09ec119fd879c6b2df48a5ed4165c13eca6ab6a3 /kernel/sched
parent35f1d99e0a3ad7f1b15ca2085ca92fd545dd01de (diff)
sched: take into account of limited CPU min and max frequencies
Actual CPU's min and max frequencies can be limited by hardware components while governor's not aware of. Provide an API for them to notify for scheduler to be able to notice accurate currently operating frequency boundaries which helps better task placement decision. CRs-fixed: 1006303 Change-Id: I608f5fa8b0baff8d9e998731dcddec59c9073d20 Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c80
-rw-r--r--kernel/sched/sched.h18
2 files changed, 72 insertions, 26 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3afa3c5d5ebd..ac5f67da19b9 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1312,7 +1312,7 @@ capacity_scale_cpu_efficiency(struct sched_cluster *cluster)
*/
static unsigned long capacity_scale_cpu_freq(struct sched_cluster *cluster)
{
- return (1024 * cluster->max_freq) / min_max_freq;
+ return (1024 * cluster_max_freq(cluster)) / min_max_freq;
}
/*
@@ -1333,7 +1333,8 @@ load_scale_cpu_efficiency(struct sched_cluster *cluster)
*/
static inline unsigned long load_scale_cpu_freq(struct sched_cluster *cluster)
{
- return DIV_ROUND_UP(1024 * max_possible_freq, cluster->max_freq);
+ return DIV_ROUND_UP(1024 * max_possible_freq,
+ cluster_max_freq(cluster));
}
static int compute_capacity(struct sched_cluster *cluster)
@@ -1399,6 +1400,7 @@ static struct sched_cluster init_cluster = {
.load_scale_factor = 1024,
.cur_freq = 1,
.max_freq = 1,
+ .max_mitigated_freq = UINT_MAX,
.min_freq = 1,
.max_possible_freq = 1,
.cpu_cycle_max_scale_factor = 1,
@@ -1548,6 +1550,7 @@ static struct sched_cluster *alloc_new_cluster(const struct cpumask *cpus)
cluster->load_scale_factor = 1024;
cluster->cur_freq = 1;
cluster->max_freq = 1;
+ cluster->max_mitigated_freq = UINT_MAX;
cluster->min_freq = 1;
cluster->max_possible_freq = 1;
cluster->cpu_cycle_max_scale_factor = 1;
@@ -1992,7 +1995,7 @@ static int send_notification(struct rq *rq, int check_pred)
u64 prev = rq->old_busy_time;
u64 predicted = rq->hmp_stats.pred_demands_sum;
- if (rq->cluster->cur_freq == rq->cluster->max_freq)
+ if (rq->cluster->cur_freq == cpu_max_freq(cpu_of(rq)))
return 0;
prev = max(prev, rq->old_estimated_time);
@@ -3591,6 +3594,53 @@ unsigned int sched_get_group_id(struct task_struct *p)
return group_id;
}
+static void update_cpu_cluster_capacity(const cpumask_t *cpus)
+{
+ int i;
+ struct sched_cluster *cluster;
+ struct cpumask cpumask;
+
+ cpumask_copy(&cpumask, cpus);
+ pre_big_task_count_change(cpu_possible_mask);
+
+ for_each_cpu(i, &cpumask) {
+ cluster = cpu_rq(i)->cluster;
+ cpumask_andnot(&cpumask, &cpumask, &cluster->cpus);
+
+ cluster->capacity = compute_capacity(cluster);
+ cluster->load_scale_factor = compute_load_scale_factor(cluster);
+
+ /* 'cpus' can contain cpumask more than one cluster */
+ check_for_up_down_migrate_update(&cluster->cpus);
+ }
+
+ __update_min_max_capacity();
+
+ post_big_task_count_change(cpu_possible_mask);
+}
+
+void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin, u32 fmax)
+{
+ struct cpumask cpumask;
+ struct sched_cluster *cluster;
+ unsigned int orig_max_freq;
+ int i, update_capacity = 0;
+
+ cpumask_copy(&cpumask, cpus);
+ for_each_cpu(i, &cpumask) {
+ cluster = cpu_rq(i)->cluster;
+ cpumask_andnot(&cpumask, &cpumask, &cluster->cpus);
+
+ orig_max_freq = cpu_max_freq(i);
+ cluster->max_mitigated_freq = fmax;
+
+ update_capacity += (orig_max_freq != cpu_max_freq(i));
+ }
+
+ if (update_capacity)
+ update_cpu_cluster_capacity(cpus);
+}
+
static int cpufreq_notifier_policy(struct notifier_block *nb,
unsigned long val, void *data)
{
@@ -3621,7 +3671,7 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
cpumask_andnot(&policy_cluster, &policy_cluster,
&cluster->cpus);
- orig_max_freq = cluster->max_freq;
+ orig_max_freq = cpu_max_freq(i);
cluster->min_freq = policy->min;
cluster->max_freq = policy->max;
cluster->cur_freq = policy->cur;
@@ -3643,27 +3693,11 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
continue;
}
- update_capacity += (orig_max_freq != policy->max);
+ update_capacity += (orig_max_freq != cpu_max_freq(i));
}
- if (!update_capacity)
- return 0;
-
- policy_cluster = *policy->related_cpus;
- pre_big_task_count_change(cpu_possible_mask);
-
- for_each_cpu(i, &policy_cluster) {
- cluster = cpu_rq(i)->cluster;
- cpumask_andnot(&policy_cluster, &policy_cluster,
- &cluster->cpus);
- cluster->capacity = compute_capacity(cluster);
- cluster->load_scale_factor = compute_load_scale_factor(cluster);
- }
-
- __update_min_max_capacity();
-
- check_for_up_down_migrate_update(policy->related_cpus);
- post_big_task_count_change(cpu_possible_mask);
+ if (update_capacity)
+ update_cpu_cluster_capacity(policy->related_cpus);
return 0;
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 19033bfc3f8e..96f5c187dfcd 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -383,10 +383,12 @@ struct sched_cluster {
int efficiency; /* Differentiate cpus with different IPC capability */
int load_scale_factor;
/*
- * max_freq = user or thermal defined maximum
+ * max_freq = user maximum
+ * max_mitigated_freq = thermal defined maximum
* max_possible_freq = maximum supported by hardware
*/
- unsigned int cur_freq, max_freq, min_freq, max_possible_freq;
+ unsigned int cur_freq, max_freq, max_mitigated_freq, min_freq;
+ unsigned int max_possible_freq;
/*
* cpu_cycle_max_scale_factor represents number of cycles per NSEC at
* CPU's fmax.
@@ -1105,9 +1107,19 @@ static inline unsigned int cpu_min_freq(int cpu)
return cpu_rq(cpu)->cluster->min_freq;
}
+static inline unsigned int cluster_max_freq(struct sched_cluster *cluster)
+{
+ /*
+ * Governor and thermal driver don't know the other party's mitigation
+ * voting. So struct cluster saves both and return min() for current
+ * cluster fmax.
+ */
+ return min(cluster->max_mitigated_freq, cluster->max_freq);
+}
+
static inline unsigned int cpu_max_freq(int cpu)
{
- return cpu_rq(cpu)->cluster->max_freq;
+ return cluster_max_freq(cpu_rq(cpu)->cluster);
}
static inline unsigned int cpu_max_possible_freq(int cpu)