summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorOlav Haugan <ohaugan@codeaurora.org>2014-11-24 16:54:59 -0800
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:01:19 -0700
commit5a48aeb06c504302abaec157664a6f3a6205b2cd (patch)
tree6f475bf8cabcd0ec9569cc3a7a856075b56b31a7 /kernel
parent72fa561b0dfd807f1f69ffd6a6f63cdf38a5d000 (diff)
sched: Add temperature to cpu_load trace point
Add the current CPU temperature to the sched_cpu_load trace point. This will allow us to track the CPU temperature. CRs-Fixed: 764788 Change-Id: Ib2e3559bbbe3fe07a6b7c8115db606828bc36254 Signed-off-by: Olav Haugan <ohaugan@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c25
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/sched.h1
3 files changed, 24 insertions, 4 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 262c8528e775..3204ca2a9619 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3089,7 +3089,8 @@ static int best_small_task_cpu(struct task_struct *p, int sync)
trace_sched_cpu_load(cpu_rq(i), idle_cpu(i),
mostly_idle_cpu_sync(i, sync),
- sched_irqload(i), power_cost(p, i));
+ sched_irqload(i), power_cost(p, i),
+ cpu_temp(i));
cpu_cost = power_cost(p, i);
if (cpu_cost < min_cost) {
@@ -3244,7 +3245,8 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
trace_sched_cpu_load(cpu_rq(i), idle_cpu(i),
mostly_idle_cpu_sync(i, sync),
- sched_irqload(i), power_cost(p, i));
+ sched_irqload(i), power_cost(p, i),
+ cpu_temp(i));
if (skip_cpu(p, i, reason))
continue;
@@ -3731,6 +3733,16 @@ static inline int is_cpu_throttling_imminent(int cpu)
return throttling;
}
+unsigned int cpu_temp(int cpu)
+{
+ struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
+ if (per_cpu_info)
+ return per_cpu_info[cpu].temp;
+ else
+ return 0;
+}
+
+
#else /* CONFIG_SCHED_HMP */
#define sched_enable_power_aware 0
@@ -3792,6 +3804,12 @@ static inline int is_cpu_throttling_imminent(int cpu)
return 0;
}
+unsigned int cpu_temp(int cpu)
+{
+ return 0;
+}
+
+
#endif /* CONFIG_SCHED_HMP */
#ifdef CONFIG_SCHED_HMP
@@ -7797,7 +7815,8 @@ static inline void update_sg_lb_stats(struct lb_env *env,
trace_sched_cpu_load(cpu_rq(i), idle_cpu(i),
mostly_idle_cpu(i),
sched_irqload(i),
- power_cost_at_freq(i, 0));
+ power_cost_at_freq(i, 0),
+ cpu_temp(i));
/* Bias balancing toward cpus of our domain */
if (local_group)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 3d4da64c91f6..f20b6711a1bb 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1658,7 +1658,7 @@ static int find_lowest_rq_hmp(struct task_struct *task)
struct rq *rq = cpu_rq(i);
cpu_cost = power_cost_at_freq(i, ACCESS_ONCE(rq->min_freq));
trace_sched_cpu_load(rq, idle_cpu(i), mostly_idle_cpu(i),
- sched_irqload(i), cpu_cost);
+ sched_irqload(i), cpu_cost, cpu_temp(i));
if (sched_boost() && capacity(rq) != max_capacity)
continue;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index a53383d6bf6c..2307b7cea12c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -998,6 +998,7 @@ extern void fixup_nr_big_small_task(int cpu);
unsigned int max_task_load(void);
extern void sched_account_irqtime(int cpu, struct task_struct *curr,
u64 delta, u64 wallclock);
+unsigned int cpu_temp(int cpu);
static inline int capacity(struct rq *rq)
{