summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorMorten Rasmussen <morten.rasmussen@arm.com>2015-01-02 17:08:52 +0000
committerLeo Yan <leo.yan@linaro.org>2016-05-10 16:49:51 +0800
commit5ec8ccabfef3989f4514f22c034a15aeab691110 (patch)
tree95d8c0ee5d8e54fe820710e51819d41ce0f11de5 /kernel
parentb6c0399e0f340887f369c6870ba6a865afe0b0ec (diff)
sched: Highest energy aware balancing sched_domain level pointer
Add another member to the family of per-cpu sched_domain shortcut pointers. This one, sd_ea, points to the highest level at which energy model is provided. At this level and all levels below all sched_groups have energy model data attached. Partial energy model information is possible but restricted to providing energy model data for lower level sched_domains (sd_ea and below) and leaving load-balancing on levels above to non-energy-aware load-balancing. For example, it is possible to apply energy-aware scheduling within each socket on a multi-socket system and let normal scheduling handle load-balancing between sockets. cc: Ingo Molnar <mingo@redhat.com> cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c11
-rw-r--r--kernel/sched/sched.h1
2 files changed, 11 insertions, 1 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2863d223c07e..45e54b0e3669 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5993,11 +5993,12 @@ DEFINE_PER_CPU(int, sd_llc_id);
DEFINE_PER_CPU(struct sched_domain *, sd_numa);
DEFINE_PER_CPU(struct sched_domain *, sd_busy);
DEFINE_PER_CPU(struct sched_domain *, sd_asym);
+DEFINE_PER_CPU(struct sched_domain *, sd_ea);
static void update_top_cache_domain(int cpu)
{
struct sched_domain *sd;
- struct sched_domain *busy_sd = NULL;
+ struct sched_domain *busy_sd = NULL, *ea_sd = NULL;
int id = cpu;
int size = 1;
@@ -6018,6 +6019,14 @@ static void update_top_cache_domain(int cpu)
sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
+
+ for_each_domain(cpu, sd) {
+ if (sd->groups->sge)
+ ea_sd = sd;
+ else
+ break;
+ }
+ rcu_assign_pointer(per_cpu(sd_ea, cpu), ea_sd);
}
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 1813cba2995d..a77516a15ba0 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -839,6 +839,7 @@ DECLARE_PER_CPU(int, sd_llc_id);
DECLARE_PER_CPU(struct sched_domain *, sd_numa);
DECLARE_PER_CPU(struct sched_domain *, sd_busy);
DECLARE_PER_CPU(struct sched_domain *, sd_asym);
+DECLARE_PER_CPU(struct sched_domain *, sd_ea);
struct sched_group_capacity {
atomic_t ref;