summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOlav Haugan <ohaugan@codeaurora.org>2015-08-10 16:41:44 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:02:27 -0700
commit03a683a55c450e26f1ebde7400f64a4b7ecd68c2 (patch)
tree89d641bf72662652aff71607161baac88b731110
parent4996dafe6875f2804d525e6ac74162e3efc4cda3 (diff)
sched: Add tunables for static cpu and cluster cost
Add per-cpu tunable to set the extra cost to use a CPU that is idle. Add the same for a cluster. Change-Id: I4aa53f3c42c963df7abc7480980f747f0413d389 Signed-off-by: Olav Haugan <ohaugan@codeaurora.org> [joonwoop@codeaurora.org: omitted changes for qhmp*.[c,h] stripped out CONFIG_SCHED_QHMP in drivers/base/cpu.c and include/linux/sched.h] Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
-rw-r--r--drivers/base/cpu.c99
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/trace/events/sched.h7
-rw-r--r--kernel/sched/core.c29
-rw-r--r--kernel/sched/debug.c2
-rw-r--r--kernel/sched/fair.c12
-rw-r--r--kernel/sched/sched.h2
7 files changed, 149 insertions, 6 deletions
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 91bbb1959d8d..dee022638fe6 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -180,10 +180,106 @@ static struct attribute_group crash_note_cpu_attr_group = {
};
#endif
+#ifdef CONFIG_SCHED_HMP
+
+static ssize_t show_sched_static_cpu_pwr_cost(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ ssize_t rc;
+ int cpuid = cpu->dev.id;
+ unsigned int pwr_cost;
+
+ pwr_cost = sched_get_static_cpu_pwr_cost(cpuid);
+
+ rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost);
+
+ return rc;
+}
+
+static ssize_t __ref store_sched_static_cpu_pwr_cost(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ int err;
+ int cpuid = cpu->dev.id;
+ unsigned int pwr_cost;
+
+ err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost);
+ if (err)
+ return err;
+
+ err = sched_set_static_cpu_pwr_cost(cpuid, pwr_cost);
+
+ if (err >= 0)
+ err = count;
+
+ return err;
+}
+
+static ssize_t show_sched_static_cluster_pwr_cost(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ ssize_t rc;
+ int cpuid = cpu->dev.id;
+ unsigned int pwr_cost;
+
+ pwr_cost = sched_get_static_cluster_pwr_cost(cpuid);
+
+ rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost);
+
+ return rc;
+}
+
+static ssize_t __ref store_sched_static_cluster_pwr_cost(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ int err;
+ int cpuid = cpu->dev.id;
+ unsigned int pwr_cost;
+
+ err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost);
+ if (err)
+ return err;
+
+ err = sched_set_static_cluster_pwr_cost(cpuid, pwr_cost);
+
+ if (err >= 0)
+ err = count;
+
+ return err;
+}
+
+static DEVICE_ATTR(sched_static_cpu_pwr_cost, 0644,
+ show_sched_static_cpu_pwr_cost,
+ store_sched_static_cpu_pwr_cost);
+static DEVICE_ATTR(sched_static_cluster_pwr_cost, 0644,
+ show_sched_static_cluster_pwr_cost,
+ store_sched_static_cluster_pwr_cost);
+
+static struct attribute *hmp_sched_cpu_attrs[] = {
+ &dev_attr_sched_static_cpu_pwr_cost.attr,
+ &dev_attr_sched_static_cluster_pwr_cost.attr,
+ NULL
+};
+
+static struct attribute_group sched_hmp_cpu_attr_group = {
+ .attrs = hmp_sched_cpu_attrs,
+};
+
+#endif /* CONFIG_SCHED_HMP */
+
static const struct attribute_group *common_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
&crash_note_cpu_attr_group,
#endif
+#ifdef CONFIG_SCHED_HMP
+ &sched_hmp_cpu_attr_group,
+#endif
NULL
};
@@ -191,6 +287,9 @@ static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
&crash_note_cpu_attr_group,
#endif
+#ifdef CONFIG_SCHED_HMP
+ &sched_hmp_cpu_attr_group,
+#endif
NULL
};
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9e4171146f39..77d3b4c106cd 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2352,6 +2352,10 @@ extern u32 sched_get_wake_up_idle(struct task_struct *p);
extern int sched_set_boost(int enable);
extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
extern u32 sched_get_init_task_load(struct task_struct *p);
+extern int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost);
+extern unsigned int sched_get_static_cpu_pwr_cost(int cpu);
+extern int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost);
+extern unsigned int sched_get_static_cluster_pwr_cost(int cpu);
#else
static inline int sched_set_boost(int enable)
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 3b10396d61b6..69aede209948 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -171,6 +171,7 @@ TRACE_EVENT(sched_cpu_load,
__field(unsigned int, max_freq )
__field(unsigned int, power_cost )
__field( int, cstate )
+ __field( int, dstate )
__field( int, temp )
),
@@ -187,14 +188,16 @@ TRACE_EVENT(sched_cpu_load,
__entry->max_freq = rq->max_freq;
__entry->power_cost = power_cost;
__entry->cstate = rq->cstate;
+ __entry->dstate = rq->dstate;
__entry->temp = temp;
),
- TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fcur %u fmax %u power_cost %u cstate %d temp %d",
+ TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fcur %u fmax %u power_cost %u cstate %d dstate %d temp %d",
__entry->cpu, __entry->idle, __entry->nr_running, __entry->nr_big_tasks,
__entry->load_scale_factor, __entry->capacity,
__entry->cumulative_runnable_avg, __entry->irqload, __entry->cur_freq,
- __entry->max_freq, __entry->power_cost, __entry->cstate, __entry->temp)
+ __entry->max_freq, __entry->power_cost, __entry->cstate,
+ __entry->dstate, __entry->temp)
);
TRACE_EVENT(sched_set_boost,
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 1150d8665ac8..7b874fb5ebcd 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1168,6 +1168,32 @@ static inline void clear_hmp_request(int cpu)
}
}
+int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ rq->static_cpu_pwr_cost = cost;
+ return 0;
+}
+
+unsigned int sched_get_static_cpu_pwr_cost(int cpu)
+{
+ return cpu_rq(cpu)->static_cpu_pwr_cost;
+}
+
+int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ rq->static_cluster_pwr_cost = cost;
+ return 0;
+}
+
+unsigned int sched_get_static_cluster_pwr_cost(int cpu)
+{
+ return cpu_rq(cpu)->static_cluster_pwr_cost;
+}
+
#else
static inline int got_boost_kick(void)
@@ -9398,6 +9424,9 @@ void __init sched_init(void)
rq->cur_irqload = 0;
rq->avg_irqload = 0;
rq->irqload_ts = 0;
+ rq->static_cpu_pwr_cost = 0;
+ rq->static_cluster_pwr_cost = 0;
+
#ifdef CONFIG_SCHED_FREQ_INPUT
rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
rq->old_busy_time = 0;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 5261ee0c35b9..b2bb6caa6a5b 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -324,6 +324,8 @@ do { \
P(efficiency);
P(cur_freq);
P(max_freq);
+ P(static_cpu_pwr_cost);
+ P(static_cluster_pwr_cost);
#endif
#ifdef CONFIG_SCHED_HMP
P(hmp_stats.nr_big_tasks);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 27e1a3d7bb05..f009c718fd82 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3091,14 +3091,13 @@ static unsigned int power_cost_at_freq(int cpu, unsigned int freq)
BUG();
}
-/* Return the cost of running task p on CPU cpu. This function
- * currently assumes that task p is the only task which will run on
- * the CPU. */
+/* Return the cost of running the total task load total_load on CPU cpu. */
unsigned int power_cost(u64 total_load, int cpu)
{
unsigned int task_freq;
struct rq *rq = cpu_rq(cpu);
u64 demand;
+ int total_static_pwr_cost = 0;
if (!sysctl_sched_enable_power_aware)
return rq->max_possible_capacity;
@@ -3110,7 +3109,12 @@ unsigned int power_cost(u64 total_load, int cpu)
task_freq = demand * rq->max_possible_freq;
task_freq /= 100; /* khz needed */
- return power_cost_at_freq(cpu, task_freq);
+ if (idle_cpu(cpu) && rq->cstate) {
+ total_static_pwr_cost += rq->static_cpu_pwr_cost;
+ if (rq->dstate)
+ total_static_pwr_cost += rq->static_cluster_pwr_cost;
+ }
+ return power_cost_at_freq(cpu, task_freq) + total_static_pwr_cost;
}
#define UP_MIGRATION 1
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4380cfacf1da..b38041e3df9b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -690,6 +690,8 @@ struct rq {
u64 cur_irqload;
u64 avg_irqload;
u64 irqload_ts;
+ unsigned int static_cpu_pwr_cost;
+ unsigned int static_cluster_pwr_cost;
#ifdef CONFIG_SCHED_FREQ_INPUT
unsigned int old_busy_time;