summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSteve Muckle <smuckle@codeaurora.org>2014-11-13 14:58:10 -0800
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:01:09 -0700
commitd3abb1dd6b3e077782117dbb2384cc256c03e390 (patch)
treea8a8246fb1c7d9a21cb8f6d04bd0b859c017e821 /kernel
parent4006da6ec4666e8e2f8d590564a04da26a534860 (diff)
sched: avoid CPUs with high irq activity
CPUs with significant IRQ activity will not be able to serve tasks quickly. Avoid them if possible by disqualifying such CPUs from being recognized as mostly idle. Change-Id: I2c09272a4f259f0283b272455147d288fce11982 Signed-off-by: Steve Muckle <smuckle@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c12
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/sched.h23
3 files changed, 29 insertions, 8 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6c7d2e4d5e90..561c3fc1ab55 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2809,12 +2809,10 @@ spill_threshold_crossed(struct task_struct *p, struct rq *rq, int cpu)
int mostly_idle_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
- int mostly_idle;
- mostly_idle = (cpu_load(cpu) <= rq->mostly_idle_load
- && rq->nr_running <= rq->mostly_idle_nr_run);
-
- return mostly_idle;
+ return cpu_load(cpu) <= rq->mostly_idle_load
+ && rq->nr_running <= rq->mostly_idle_nr_run
+ && !sched_cpu_high_irqload(cpu);
}
static int boost_refcount;
@@ -3066,7 +3064,7 @@ static int best_small_task_cpu(struct task_struct *p)
continue;
}
- if (idle_cpu(i) && cstate) {
+ if (idle_cpu(i) && cstate && !sched_cpu_high_irqload(i)) {
if (cstate < min_cstate) {
min_cstate_cpu = i;
min_cstate = cstate;
@@ -3154,7 +3152,7 @@ static int select_packing_target(struct task_struct *p, int best_cpu)
for_each_cpu(i, &search_cpus) {
int cost = power_cost(p, i);
- if (cost < min_cost) {
+ if (cost < min_cost && !sched_cpu_high_irqload(i)) {
target = i;
min_cost = cost;
}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index f2f9b92f75cb..a9c347291f5e 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1663,7 +1663,7 @@ static int find_lowest_rq_hmp(struct task_struct *task)
if (sched_boost() && capacity(rq) != max_capacity)
continue;
- if (cpu_cost < min_cost) {
+ if (cpu_cost < min_cost && !sched_cpu_high_irqload(i)) {
min_cost = cpu_cost;
best_cpu = i;
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 6a117028ca97..cb0de7630d8b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1031,6 +1031,27 @@ dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
#define real_to_pct(tunable) \
(div64_u64((u64)tunable * (u64)100, (u64)max_task_load()))
+#define SCHED_HIGH_IRQ_TIMEOUT 3
+static inline u64 sched_irqload(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ s64 delta;
+
+ delta = get_jiffies_64() - rq->irqload_ts;
+ BUG_ON(delta < 0);
+
+ if (delta < SCHED_HIGH_IRQ_TIMEOUT)
+ return rq->avg_irqload;
+ else
+ return 0;
+}
+
+#define SCHED_HIGH_IRQ_NS (10 * NSEC_PER_MSEC)
+static inline int sched_cpu_high_irqload(int cpu)
+{
+ return sched_irqload(cpu) >= SCHED_HIGH_IRQ_NS;
+}
+
#else /* CONFIG_SCHED_HMP */
static inline int pct_task_load(struct task_struct *p) { return 0; }
@@ -1065,6 +1086,8 @@ static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
{
}
+static inline int sched_cpu_high_irqload(int cpu) { return 0; }
+
#endif /* CONFIG_SCHED_HMP */
#ifdef CONFIG_SCHED_FREQ_INPUT