diff options
| author | Srinath Sridharan <srinathsr@google.com> | 2016-07-22 13:21:15 +0100 |
|---|---|---|
| committer | Amit Pundir <amit.pundir@linaro.org> | 2016-09-14 15:02:22 +0530 |
| commit | cf8449f421c99c6482c5b8ef26858dc5aa206628 (patch) | |
| tree | 52939fbe00cf19d8751bb16e9296958ba6a3aa5c /kernel/sched/cputime.c | |
| parent | b41fa2aec51a031e8b53486966e885116c314579 (diff) | |
sched/walt: Accounting for number of irqs pending on each core
Schedules on a core whose irq count is less than a threshold.
Improves I/O performance of EAS.
Change-Id: I08ff7dd0d22502a0106fc636b1af2e6fe9e758b5
Diffstat (limited to 'kernel/sched/cputime.c')
| -rw-r--r-- | kernel/sched/cputime.c | 16 |
1 files changed, 16 insertions, 0 deletions
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index f74ea89e77a8..3f232c8b2bdd 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -5,6 +5,7 @@ #include <linux/static_key.h> #include <linux/context_tracking.h> #include "sched.h" +#include "walt.h" #ifdef CONFIG_IRQ_TIME_ACCOUNTING @@ -49,6 +50,10 @@ void irqtime_account_irq(struct task_struct *curr) unsigned long flags; s64 delta; int cpu; +#ifdef CONFIG_SCHED_WALT + u64 wallclock; + bool account = true; +#endif if (!sched_clock_irqtime) return; @@ -56,6 +61,9 @@ void irqtime_account_irq(struct task_struct *curr) local_irq_save(flags); cpu = smp_processor_id(); +#ifdef CONFIG_SCHED_WALT + wallclock = sched_clock_cpu(cpu); +#endif delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); __this_cpu_add(irq_start_time, delta); @@ -70,8 +78,16 @@ void irqtime_account_irq(struct task_struct *curr) __this_cpu_add(cpu_hardirq_time, delta); else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) __this_cpu_add(cpu_softirq_time, delta); +#ifdef CONFIG_SCHED_WALT + else + account = false; +#endif irq_time_write_end(); +#ifdef CONFIG_SCHED_WALT + if (account) + walt_account_irqtime(cpu, curr, delta, wallclock); +#endif local_irq_restore(flags); } EXPORT_SYMBOL_GPL(irqtime_account_irq); |
