summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@codeaurora.org>2014-08-08 18:14:54 +0530
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:00:23 -0700
commit90a01bb623b08e0fac1f7e771cbf23d27cdb1abc (patch)
treecdea3438440fdec333d5936d3c829d3e89083a60
parent9425ce430926fd213d5f65a46d724db54b7b1c0f (diff)
sched: window-stats: Code cleanup
add_task_demand() and 'long_sleep' calculation in it are not strictly required. rq_freq_margin() check for need to change frequency, which removes need for long_sleep calculation. Once that is removed, need for add_task_demand() vanishes. Change-Id: I936540c06072eb8238fc18754aba88789ee3c9f5 Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org> [joonwoop@codeaurora.org: fixed minior conflict in core.c] Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
-rw-r--r--kernel/sched/core.c80
1 files changed, 32 insertions, 48 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b84bf2f9b6ed..c8b0973b7f60 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1434,29 +1434,8 @@ static inline u64 scale_exec_time(u64 delta, struct rq *rq)
return delta;
}
-/*
- * We depend on task's partial_demand to be always represented in
- * rq->curr_runnable_sum and its demand to be represented in
- * rq->prev_runnable_sum. When task wakes up (TASK_WAKE) or is picked to run
- * (PICK_NEXT_TASK) or migrated (TASK_MIGRATE) with sched_account_wait_time ==
- * 0, ensure this dependency is met.
- */
-static inline int add_task_demand(int event, struct task_struct *p,
- struct rq *rq, int *long_sleep)
-{
- if ((p->ravg.flags & CURR_WINDOW_CONTRIB) &&
- (p->ravg.flags & PREV_WINDOW_CONTRIB))
- return 0;
-
- if (long_sleep && (rq->window_start > p->ravg.mark_start &&
- rq->window_start - p->ravg.mark_start > sched_ravg_window))
- *long_sleep = 1;
-
- return 1;
-}
-
static void update_task_ravg(struct task_struct *p, struct rq *rq,
- int event, u64 wallclock, int *long_sleep, u64 irqtime)
+ int event, u64 wallclock, u64 irqtime)
{
u32 window_size = sched_ravg_window;
int update_sum, new_window;
@@ -1571,16 +1550,22 @@ static void update_task_ravg(struct task_struct *p, struct rq *rq,
mark_start = window_start;
} while (new_window);
- if (add_task_demand(event, p, rq, long_sleep)) {
- if (!(p->ravg.flags & CURR_WINDOW_CONTRIB)) {
- rq->curr_runnable_sum += p->ravg.partial_demand;
- p->ravg.flags |= CURR_WINDOW_CONTRIB;
- }
+ /*
+ * We depend on task's partial_demand to be always represented in
+ * rq->curr_runnable_sum and its demand to be represented in
+ * rq->prev_runnable_sum. When task wakes up (TASK_WAKE) or is picked to
+ * run (PICK_NEXT_TASK) or migrated (TASK_MIGRATE) with
+ * sched_account_wait_time == 0, ensure this dependency is met.
+ */
- if (!(p->ravg.flags & PREV_WINDOW_CONTRIB)) {
- rq->prev_runnable_sum += p->ravg.demand;
- p->ravg.flags |= PREV_WINDOW_CONTRIB;
- }
+ if (!(p->ravg.flags & CURR_WINDOW_CONTRIB)) {
+ rq->curr_runnable_sum += p->ravg.partial_demand;
+ p->ravg.flags |= CURR_WINDOW_CONTRIB;
+ }
+
+ if (!(p->ravg.flags & PREV_WINDOW_CONTRIB)) {
+ rq->prev_runnable_sum += p->ravg.demand;
+ p->ravg.flags |= PREV_WINDOW_CONTRIB;
}
done:
@@ -1599,7 +1584,7 @@ void sched_account_irqtime(int cpu, struct task_struct *curr,
return;
raw_spin_lock_irqsave(&rq->lock, flags);
- update_task_ravg(curr, rq, IRQ_UPDATE, wallclock, NULL, delta);
+ update_task_ravg(curr, rq, IRQ_UPDATE, wallclock, delta);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
@@ -1644,7 +1629,7 @@ static inline void mark_task_starting(struct task_struct *p)
return;
}
- update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, NULL, 0);
+ update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
p->ravg.mark_start = wallclock;
rq->prev_runnable_sum += p->ravg.demand;
rq->curr_runnable_sum += p->ravg.partial_demand;
@@ -1697,7 +1682,7 @@ unsigned long sched_get_busy(int cpu)
* that the window stats are current by doing an update.
*/
raw_spin_lock_irqsave(&rq->lock, flags);
- update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), NULL, 0);
+ update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0);
raw_spin_unlock_irqrestore(&rq->lock, flags);
return div64_u64(scale_load_to_cpu(rq->prev_runnable_sum, cpu),
@@ -1960,7 +1945,7 @@ static int cpufreq_notifier_trans(struct notifier_block *nb,
BUG_ON(!new_freq);
raw_spin_lock_irqsave(&rq->lock, flags);
- update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), NULL, 0);
+ update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0);
cpu_rq(cpu)->cur_freq = new_freq;
raw_spin_unlock_irqrestore(&rq->lock, flags);
@@ -2013,9 +1998,9 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu)
update_task_ravg(task_rq(p)->curr, task_rq(p),
TASK_UPDATE,
- wallclock, NULL, 0);
+ wallclock, 0);
update_task_ravg(dest_rq->curr, dest_rq,
- TASK_UPDATE, wallclock, NULL, 0);
+ TASK_UPDATE, wallclock, 0);
/*
* In case of migration of task on runqueue, on_rq =1,
@@ -2032,7 +2017,7 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu)
}
update_task_ravg(p, task_rq(p), TASK_MIGRATE,
- wallclock, NULL, 0);
+ wallclock, 0);
/*
* Remove task's load from rq as its now migrating to
@@ -2087,7 +2072,7 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu)
static inline void
update_task_ravg(struct task_struct *p, struct rq *rq,
- int event, u64 wallclock, int *long_sleep, u64 irqtime)
+ int event, u64 wallclock, u64 irqtime)
{
}
@@ -2994,6 +2979,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
}
__read_mostly unsigned int sysctl_sched_wakeup_load_threshold = 110;
+
/**
* try_to_wake_up - wake up a thread
* @p: the thread to be awakened
@@ -3016,7 +3002,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
int cpu, src_cpu, success = 0;
#ifdef CONFIG_SMP
struct rq *rq;
- int long_sleep = 0;
u64 wallclock;
#endif
@@ -3081,8 +3066,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
raw_spin_lock(&rq->lock);
wallclock = sched_clock();
- update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, NULL, 0);
- update_task_ravg(p, rq, TASK_WAKE, wallclock, &long_sleep, 0);
+ update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+ update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
raw_spin_unlock(&rq->lock);
p->sched_contributes_to_load = !!task_contributes_to_load(p);
@@ -3149,7 +3134,6 @@ out:
static void try_to_wake_up_local(struct task_struct *p)
{
struct rq *rq = task_rq(p);
- int long_sleep = 0;
if (rq != this_rq() || p == current) {
printk_deferred("%s: Failed to wakeup task %d (%s), rq = %p,"
@@ -3183,8 +3167,8 @@ static void try_to_wake_up_local(struct task_struct *p)
if (!task_on_rq_queued(p)) {
u64 wallclock = sched_clock();
- update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, NULL, 0);
- update_task_ravg(p, rq, TASK_WAKE, wallclock, &long_sleep, 0);
+ update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+ update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
}
@@ -4030,7 +4014,7 @@ void scheduler_tick(void)
curr->sched_class->task_tick(rq, curr, 0);
update_cpu_load_active(rq);
calc_global_load_tick(rq);
- update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), NULL, 0);
+ update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0);
raw_spin_unlock(&rq->lock);
perf_event_task_tick();
@@ -4331,8 +4315,8 @@ static void __sched notrace __schedule(bool preempt)
next = pick_next_task(rq, prev);
wallclock = sched_clock();
- update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, NULL, 0);
- update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, NULL, 0);
+ update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
+ update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
clear_tsk_need_resched(prev);
clear_preempt_need_resched();
rq->clock_skip_update = 0;