summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c28
-rw-r--r--kernel/sched/hmp.c7
-rw-r--r--kernel/sched/tune.c2
3 files changed, 21 insertions, 16 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index df23b0365527..7fdf1efa9086 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2619,7 +2619,7 @@ struct cluster_cpu_stats {
int best_idle_cpu, least_loaded_cpu;
int best_capacity_cpu, best_cpu, best_sibling_cpu;
int min_cost, best_sibling_cpu_cost;
- int best_cpu_cstate;
+ int best_cpu_wakeup_latency;
u64 min_load, best_load, best_sibling_cpu_load;
s64 highest_spare_capacity;
};
@@ -2827,19 +2827,19 @@ next_best_cluster(struct sched_cluster *cluster, struct cpu_select_env *env,
static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
struct cpu_select_env *env, int cpu_cost)
{
- int cpu_cstate;
+ int wakeup_latency;
int prev_cpu = env->prev_cpu;
- cpu_cstate = cpu_rq(cpu)->cstate;
+ wakeup_latency = cpu_rq(cpu)->wakeup_latency;
if (env->need_idle) {
stats->min_cost = cpu_cost;
if (idle_cpu(cpu)) {
- if (cpu_cstate < stats->best_cpu_cstate ||
- (cpu_cstate == stats->best_cpu_cstate &&
- cpu == prev_cpu)) {
+ if (wakeup_latency < stats->best_cpu_wakeup_latency ||
+ (wakeup_latency == stats->best_cpu_wakeup_latency &&
+ cpu == prev_cpu)) {
stats->best_idle_cpu = cpu;
- stats->best_cpu_cstate = cpu_cstate;
+ stats->best_cpu_wakeup_latency = wakeup_latency;
}
} else {
if (env->cpu_load < stats->min_load ||
@@ -2855,7 +2855,7 @@ static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
if (cpu_cost < stats->min_cost) {
stats->min_cost = cpu_cost;
- stats->best_cpu_cstate = cpu_cstate;
+ stats->best_cpu_wakeup_latency = wakeup_latency;
stats->best_load = env->cpu_load;
stats->best_cpu = cpu;
env->sbc_best_flag = SBC_FLAG_CPU_COST;
@@ -2864,11 +2864,11 @@ static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
/* CPU cost is the same. Start breaking the tie by C-state */
- if (cpu_cstate > stats->best_cpu_cstate)
+ if (wakeup_latency > stats->best_cpu_wakeup_latency)
return;
- if (cpu_cstate < stats->best_cpu_cstate) {
- stats->best_cpu_cstate = cpu_cstate;
+ if (wakeup_latency < stats->best_cpu_wakeup_latency) {
+ stats->best_cpu_wakeup_latency = wakeup_latency;
stats->best_load = env->cpu_load;
stats->best_cpu = cpu;
env->sbc_best_flag = SBC_FLAG_COST_CSTATE_TIE_BREAKER;
@@ -2883,8 +2883,8 @@ static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
}
if (stats->best_cpu != prev_cpu &&
- ((cpu_cstate == 0 && env->cpu_load < stats->best_load) ||
- (cpu_cstate > 0 && env->cpu_load > stats->best_load))) {
+ ((wakeup_latency == 0 && env->cpu_load < stats->best_load) ||
+ (wakeup_latency > 0 && env->cpu_load > stats->best_load))) {
stats->best_load = env->cpu_load;
stats->best_cpu = cpu;
env->sbc_best_flag = SBC_FLAG_CSTATE_LOAD;
@@ -2979,7 +2979,7 @@ static inline void init_cluster_cpu_stats(struct cluster_cpu_stats *stats)
stats->min_load = stats->best_sibling_cpu_load = ULLONG_MAX;
stats->highest_spare_capacity = 0;
stats->least_loaded_cpu = -1;
- stats->best_cpu_cstate = INT_MAX;
+ stats->best_cpu_wakeup_latency = INT_MAX;
/* No need to initialize stats->best_load */
}
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index a0686ea29243..3d5de8ba70a2 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -24,6 +24,8 @@
#include <trace/events/sched.h>
+#define CSTATE_LATENCY_GRANULARITY_SHIFT (6)
+
const char *task_event_names[] = {"PUT_PREV_TASK", "PICK_NEXT_TASK",
"TASK_WAKE", "TASK_MIGRATE", "TASK_UPDATE",
"IRQ_UPDATE"};
@@ -99,7 +101,10 @@ sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
rq->cstate = cstate; /* C1, C2 etc */
rq->wakeup_energy = wakeup_energy;
- rq->wakeup_latency = wakeup_latency;
+ /* disregard small latency delta (64 us). */
+ rq->wakeup_latency = ((wakeup_latency >>
+ CSTATE_LATENCY_GRANULARITY_SHIFT) <<
+ CSTATE_LATENCY_GRANULARITY_SHIFT);
}
/*
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index 3c964d6d3856..4f8182302e5e 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -29,7 +29,7 @@ struct schedtune {
static inline struct schedtune *css_st(struct cgroup_subsys_state *css)
{
- return css ? container_of(css, struct schedtune, css) : NULL;
+ return container_of(css, struct schedtune, css);
}
static inline struct schedtune *task_schedtune(struct task_struct *tsk)