summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/scheduler/sched-hmp.txt7
-rw-r--r--kernel/sched/fair.c23
2 files changed, 28 insertions, 2 deletions
diff --git a/Documentation/scheduler/sched-hmp.txt b/Documentation/scheduler/sched-hmp.txt
index 62c33b8b45fd..581a9f4d5159 100644
--- a/Documentation/scheduler/sched-hmp.txt
+++ b/Documentation/scheduler/sched-hmp.txt
@@ -819,9 +819,14 @@ in Sec 5.
This is a per-cpu parameter. If non-zero for a cpu which is part of a cluster
and cluster current frequency is less than this threshold, then scheduler will
-poack all tasks on a single cpu in cluster. The cpu chosen is the first most
+pack all tasks on a single cpu in cluster. The cpu chosen is the first most
power-efficient cpu found while scanning cluster's online cpus.
+- PF_WAKE_UP_IDLE
+ Any task that has this flag set in its 'task_struct.flags' field will be
+always woken to idle cpu. Further any task woken by such tasks will be also
+placed on idle cpu.
+
For some low band of frequency, spread of task on all available cpus can be
groslly power-inefficient. As an example, consider two tasks that each need
500MHz. Packing them on one cpu could lead to 1GHz. In spread case, we incur
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d9d4694d58da..4260cb552d9f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3230,6 +3230,20 @@ static int select_packing_target(struct task_struct *p, int best_cpu)
return target;
}
+/*
+ * Should task be woken to any available idle cpu?
+ *
+ * Waking tasks to idle cpu has mixed implications on both performance and
+ * power. In many cases, scheduler can't estimate correctly impact of using idle
+ * cpus on either performance or power. PF_WAKE_UP_IDLE allows external kernel
+ * module to pass a strong hint to scheduler that the task in question should be
+ * woken to idle cpu, generally to improve performance.
+ */
+static inline int wake_to_idle(struct task_struct *p)
+{
+ return (current->flags & PF_WAKE_UP_IDLE) ||
+ (p->flags & PF_WAKE_UP_IDLE);
+}
/* return cheapest cpu that can fit this task */
static int select_best_cpu(struct task_struct *p, int target, int reason,
@@ -3245,6 +3259,13 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
int cstate, min_cstate = INT_MAX;
int prefer_idle = reason ? 1 : sysctl_sched_prefer_idle;
int curr_cpu = smp_processor_id();
+ int prefer_idle_override = 0;
+
+ if (wake_to_idle(p)) {
+ prefer_idle = 1;
+ prefer_idle_override = 1;
+ small_task = 0;
+ }
trace_sched_task_load(p, small_task, boost, reason, sync, prefer_idle);
@@ -3377,7 +3398,7 @@ done:
best_cpu = fallback_idle_cpu;
}
- if (cpu_rq(best_cpu)->mostly_idle_freq)
+ if (cpu_rq(best_cpu)->mostly_idle_freq && !prefer_idle_override)
best_cpu = select_packing_target(p, best_cpu);
return best_cpu;