summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c29
-rw-r--r--kernel/sched/debug.c2
-rw-r--r--kernel/sched/fair.c12
-rw-r--r--kernel/sched/sched.h2
4 files changed, 41 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 1150d8665ac8..7b874fb5ebcd 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1168,6 +1168,32 @@ static inline void clear_hmp_request(int cpu)
}
}
+int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ rq->static_cpu_pwr_cost = cost;
+ return 0;
+}
+
+unsigned int sched_get_static_cpu_pwr_cost(int cpu)
+{
+ return cpu_rq(cpu)->static_cpu_pwr_cost;
+}
+
+int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ rq->static_cluster_pwr_cost = cost;
+ return 0;
+}
+
+unsigned int sched_get_static_cluster_pwr_cost(int cpu)
+{
+ return cpu_rq(cpu)->static_cluster_pwr_cost;
+}
+
#else
static inline int got_boost_kick(void)
@@ -9398,6 +9424,9 @@ void __init sched_init(void)
rq->cur_irqload = 0;
rq->avg_irqload = 0;
rq->irqload_ts = 0;
+ rq->static_cpu_pwr_cost = 0;
+ rq->static_cluster_pwr_cost = 0;
+
#ifdef CONFIG_SCHED_FREQ_INPUT
rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
rq->old_busy_time = 0;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 5261ee0c35b9..b2bb6caa6a5b 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -324,6 +324,8 @@ do { \
P(efficiency);
P(cur_freq);
P(max_freq);
+ P(static_cpu_pwr_cost);
+ P(static_cluster_pwr_cost);
#endif
#ifdef CONFIG_SCHED_HMP
P(hmp_stats.nr_big_tasks);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 27e1a3d7bb05..f009c718fd82 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3091,14 +3091,13 @@ static unsigned int power_cost_at_freq(int cpu, unsigned int freq)
BUG();
}
-/* Return the cost of running task p on CPU cpu. This function
- * currently assumes that task p is the only task which will run on
- * the CPU. */
+/* Return the cost of running the total task load total_load on CPU cpu. */
unsigned int power_cost(u64 total_load, int cpu)
{
unsigned int task_freq;
struct rq *rq = cpu_rq(cpu);
u64 demand;
+ int total_static_pwr_cost = 0;
if (!sysctl_sched_enable_power_aware)
return rq->max_possible_capacity;
@@ -3110,7 +3109,12 @@ unsigned int power_cost(u64 total_load, int cpu)
task_freq = demand * rq->max_possible_freq;
task_freq /= 100; /* khz needed */
- return power_cost_at_freq(cpu, task_freq);
+ if (idle_cpu(cpu) && rq->cstate) {
+ total_static_pwr_cost += rq->static_cpu_pwr_cost;
+ if (rq->dstate)
+ total_static_pwr_cost += rq->static_cluster_pwr_cost;
+ }
+ return power_cost_at_freq(cpu, task_freq) + total_static_pwr_cost;
}
#define UP_MIGRATION 1
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4380cfacf1da..b38041e3df9b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -690,6 +690,8 @@ struct rq {
u64 cur_irqload;
u64 avg_irqload;
u64 irqload_ts;
+ unsigned int static_cpu_pwr_cost;
+ unsigned int static_cluster_pwr_cost;
#ifdef CONFIG_SCHED_FREQ_INPUT
unsigned int old_busy_time;