summaryrefslogtreecommitdiff
path: root/kernel/sched/cputime.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/cputime.c')
-rw-r--r--kernel/sched/cputime.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 05de80b48586..ec3e99076941 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -49,6 +49,7 @@ void irqtime_account_irq(struct task_struct *curr)
unsigned long flags;
s64 delta;
int cpu;
+ u64 wallclock;
if (!sched_clock_irqtime)
return;
@@ -56,7 +57,8 @@ void irqtime_account_irq(struct task_struct *curr)
local_irq_save(flags);
cpu = smp_processor_id();
- delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
+ wallclock = sched_clock_cpu(cpu);
+ delta = wallclock - __this_cpu_read(irq_start_time);
__this_cpu_add(irq_start_time, delta);
irq_time_write_begin();
@@ -66,10 +68,14 @@ void irqtime_account_irq(struct task_struct *curr)
* in that case, so as not to confuse scheduler with a special task
* that do not consume any time, but still wants to run.
*/
- if (hardirq_count())
+ if (hardirq_count()) {
__this_cpu_add(cpu_hardirq_time, delta);
- else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
+ sched_account_irqtime(cpu, curr, delta, wallclock);
+ } else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) {
__this_cpu_add(cpu_softirq_time, delta);
+ sched_account_irqtime(cpu, curr, delta, wallclock);
+ }
+
irq_time_write_end();
local_irq_restore(flags);