summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h1
-rw-r--r--kernel/sched/core.c53
-rw-r--r--kernel/sched/cputime.c12
-rw-r--r--kernel/sched/sched.h7
4 files changed, 53 insertions, 20 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 53e36caed735..67b849a7df2c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -320,6 +320,7 @@ enum task_event {
TASK_WAKE = 2,
TASK_MIGRATE = 3,
TASK_UPDATE = 4,
+ IRQ_UPDATE = 5,
};
#include <linux/spinlock.h>
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index dc8d70609c87..55ec953fb4cb 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -92,7 +92,8 @@
#include <trace/events/sched.h>
const char *task_event_names[] = {"PUT_PREV_TASK", "PICK_NEXT_TASK",
- "TASK_WAKE", "TASK_MIGRATE", "TASK_UPDATE"};
+ "TASK_WAKE", "TASK_MIGRATE", "TASK_UPDATE",
+ "IRQ_UPDATE"};
ATOMIC_NOTIFIER_HEAD(migration_notifier_head);
ATOMIC_NOTIFIER_HEAD(load_alert_notifier_head);
@@ -1434,7 +1435,7 @@ static inline int add_task_demand(int event, struct task_struct *p,
}
static void update_task_ravg(struct task_struct *p, struct rq *rq,
- int event, u64 wallclock, int *long_sleep)
+ int event, u64 wallclock, int *long_sleep, u64 irqtime)
{
u32 window_size = sched_ravg_window;
int update_sum, new_window;
@@ -1455,9 +1456,13 @@ static void update_task_ravg(struct task_struct *p, struct rq *rq,
window_start = rq->window_start;
if (is_idle_task(p)) {
- if (!(event == PUT_PREV_TASK && cpu_is_waiting_on_io(rq)))
+ if (!irqtime && !(event == PUT_PREV_TASK &&
+ cpu_is_waiting_on_io(rq)))
goto done;
+ if (irqtime && !cpu_is_waiting_on_io(rq))
+ mark_start = wallclock - irqtime;
+
if (window_start > mark_start) {
delta = window_start - mark_start;
if (delta > window_size) {
@@ -1563,6 +1568,20 @@ done:
p->ravg.mark_start = wallclock;
}
+void sched_account_irqtime(int cpu, struct task_struct *curr,
+ u64 delta, u64 wallclock)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+
+ if (!is_idle_task(curr))
+ return;
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ update_task_ravg(curr, rq, IRQ_UPDATE, wallclock, NULL, delta);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+}
+
unsigned long __weak arch_get_cpu_efficiency(int cpu)
{
return SCHED_LOAD_SCALE;
@@ -1605,7 +1624,7 @@ static inline void mark_task_starting(struct task_struct *p)
return;
}
- update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, NULL);
+ update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, NULL, 0);
p->ravg.mark_start = wallclock;
rq->prev_runnable_sum += p->ravg.demand;
rq->curr_runnable_sum += p->ravg.partial_demand;
@@ -1654,7 +1673,7 @@ unsigned long sched_get_busy(int cpu)
* that the window stats are current by doing an update.
*/
raw_spin_lock_irqsave(&rq->lock, flags);
- update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), NULL);
+ update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), NULL, 0);
raw_spin_unlock_irqrestore(&rq->lock, flags);
return div64_u64(scale_load_to_cpu(rq->prev_runnable_sum, cpu),
@@ -1909,7 +1928,7 @@ static int cpufreq_notifier_trans(struct notifier_block *nb,
BUG_ON(!new_freq);
raw_spin_lock_irqsave(&rq->lock, flags);
- update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), NULL);
+ update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), NULL, 0);
cpu_rq(cpu)->cur_freq = new_freq;
raw_spin_unlock_irqrestore(&rq->lock, flags);
@@ -1962,9 +1981,9 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu)
update_task_ravg(task_rq(p)->curr, task_rq(p),
TASK_UPDATE,
- wallclock, NULL);
+ wallclock, NULL, 0);
update_task_ravg(dest_rq->curr, dest_rq,
- TASK_UPDATE, wallclock, NULL);
+ TASK_UPDATE, wallclock, NULL, 0);
/*
* In case of migration of task on runqueue, on_rq =1,
@@ -1981,7 +2000,7 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu)
}
update_task_ravg(p, task_rq(p), TASK_MIGRATE,
- wallclock, NULL);
+ wallclock, NULL, 0);
/*
* Remove task's load from rq as its now migrating to
@@ -2036,7 +2055,7 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu)
static inline void
update_task_ravg(struct task_struct *p, struct rq *rq,
- int event, u64 wallclock, int *long_sleep)
+ int event, u64 wallclock, int *long_sleep, u64 irqtime)
{
}
@@ -3030,8 +3049,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
raw_spin_lock(&rq->lock);
wallclock = sched_clock();
- update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, NULL);
- update_task_ravg(p, rq, TASK_WAKE, wallclock, &long_sleep);
+ update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, NULL, 0);
+ update_task_ravg(p, rq, TASK_WAKE, wallclock, &long_sleep, 0);
raw_spin_unlock(&rq->lock);
p->sched_contributes_to_load = !!task_contributes_to_load(p);
@@ -3132,8 +3151,8 @@ static void try_to_wake_up_local(struct task_struct *p)
if (!task_on_rq_queued(p)) {
u64 wallclock = sched_clock();
- update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, NULL);
- update_task_ravg(p, rq, TASK_WAKE, wallclock, &long_sleep);
+ update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, NULL, 0);
+ update_task_ravg(p, rq, TASK_WAKE, wallclock, &long_sleep, 0);
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
}
@@ -3979,7 +3998,7 @@ void scheduler_tick(void)
curr->sched_class->task_tick(rq, curr, 0);
update_cpu_load_active(rq);
calc_global_load_tick(rq);
- update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), NULL);
+ update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), NULL, 0);
raw_spin_unlock(&rq->lock);
perf_event_task_tick();
@@ -4280,8 +4299,8 @@ static void __sched notrace __schedule(bool preempt)
next = pick_next_task(rq, prev);
wallclock = sched_clock();
- update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, NULL);
- update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, NULL);
+ update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, NULL, 0);
+ update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, NULL, 0);
clear_tsk_need_resched(prev);
clear_preempt_need_resched();
rq->clock_skip_update = 0;
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 05de80b48586..ec3e99076941 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -49,6 +49,7 @@ void irqtime_account_irq(struct task_struct *curr)
unsigned long flags;
s64 delta;
int cpu;
+ u64 wallclock;
if (!sched_clock_irqtime)
return;
@@ -56,7 +57,8 @@ void irqtime_account_irq(struct task_struct *curr)
local_irq_save(flags);
cpu = smp_processor_id();
- delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
+ wallclock = sched_clock_cpu(cpu);
+ delta = wallclock - __this_cpu_read(irq_start_time);
__this_cpu_add(irq_start_time, delta);
irq_time_write_begin();
@@ -66,10 +68,14 @@ void irqtime_account_irq(struct task_struct *curr)
* in that case, so as not to confuse scheduler with a special task
* that do not consume any time, but still wants to run.
*/
- if (hardirq_count())
+ if (hardirq_count()) {
__this_cpu_add(cpu_hardirq_time, delta);
- else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
+ sched_account_irqtime(cpu, curr, delta, wallclock);
+ } else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) {
__this_cpu_add(cpu_softirq_time, delta);
+ sched_account_irqtime(cpu, curr, delta, wallclock);
+ }
+
irq_time_write_end();
local_irq_restore(flags);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 805b8ebd7d7f..3369d8709835 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -967,6 +967,8 @@ static inline u64 scale_load_to_cpu(u64 load, int cpu)
}
#endif
unsigned int max_task_load(void);
+extern void sched_account_irqtime(int cpu, struct task_struct *curr,
+ u64 delta, u64 wallclock);
static inline void
inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
@@ -1013,6 +1015,11 @@ static inline unsigned long capacity_scale_cpu_freq(int cpu)
return SCHED_LOAD_SCALE;
}
+static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
+ u64 delta, u64 wallclock)
+{
+}
+
#endif /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
#ifdef CONFIG_SCHED_HMP