summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorOlav Haugan <ohaugan@codeaurora.org>2015-03-03 16:55:56 -0800
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:01:57 -0700
commit6e08a8c77afc2bca795b96af124cc23eab1633ea (patch)
tree21f48109d40081735a1873e7a88968064bfb67ae /kernel
parent652e8bc905e20d43d35f13403c89a6589b9d5866 (diff)
sched/fair: Add irq load awareness to the tick CPU selection logic
IRQ load is not taken into account when determining whether a task should be migrated to a different CPU. A task that runs for a long time could get stuck on CPU with high IRQ load causing degraded performance. Add irq load awareness to the tick CPU selection logic. CRs-fixed: 809119 Change-Id: I7969f7dd947fb5d66fce0bedbc212bfb2d42c8c1 Signed-off-by: Olav Haugan <ohaugan@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c28
1 files changed, 18 insertions, 10 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6da8a188ee30..868211061673 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3334,9 +3334,10 @@ static int best_small_task_cpu(struct task_struct *p, int sync)
return best_fallback_cpu;
}
-#define MOVE_TO_BIG_CPU 1
-#define MOVE_TO_LITTLE_CPU 2
-#define MOVE_TO_POWER_EFFICIENT_CPU 3
+#define UP_MIGRATION 1
+#define DOWN_MIGRATION 2
+#define EA_MIGRATION 3
+#define IRQLOAD_MIGRATION 4
static int skip_cpu(struct task_struct *p, int cpu, int reason)
{
@@ -3351,18 +3352,22 @@ static int skip_cpu(struct task_struct *p, int cpu, int reason)
return 1;
switch (reason) {
- case MOVE_TO_BIG_CPU:
+ case UP_MIGRATION:
skip = (rq->capacity <= task_rq->capacity);
break;
- case MOVE_TO_LITTLE_CPU:
+ case DOWN_MIGRATION:
skip = (rq->capacity >= task_rq->capacity);
break;
- case MOVE_TO_POWER_EFFICIENT_CPU:
+ case EA_MIGRATION:
skip = rq->capacity < task_rq->capacity ||
power_cost(p, cpu) > power_cost(p, task_cpu(p));
break;
+
+ case IRQLOAD_MIGRATION:
+ /* Purposely fall through */
+
default:
skip = (cpu == task_cpu(p));
break;
@@ -4133,7 +4138,7 @@ static inline int migration_needed(struct rq *rq, struct task_struct *p)
if (sched_boost()) {
if (rq->capacity != max_capacity)
- return MOVE_TO_BIG_CPU;
+ return UP_MIGRATION;
return 0;
}
@@ -4141,18 +4146,21 @@ static inline int migration_needed(struct rq *rq, struct task_struct *p)
if (is_small_task(p))
return 0;
+ if (sched_cpu_high_irqload(cpu_of(rq)))
+ return IRQLOAD_MIGRATION;
+
if ((nice > sched_upmigrate_min_nice || upmigrate_discouraged(p)) &&
rq->capacity > min_capacity)
- return MOVE_TO_LITTLE_CPU;
+ return DOWN_MIGRATION;
if (!task_will_fit(p, cpu_of(rq)))
- return MOVE_TO_BIG_CPU;
+ return UP_MIGRATION;
if (sysctl_sched_enable_power_aware &&
!is_task_migration_throttled(p) &&
is_cpu_throttling_imminent(cpu_of(rq)) &&
lower_power_cpu_available(p, cpu_of(rq)))
- return MOVE_TO_POWER_EFFICIENT_CPU;
+ return EA_MIGRATION;
return 0;
}