diff options
| author | Steve Muckle <smuckle@codeaurora.org> | 2014-10-14 13:51:34 -0700 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 20:01:02 -0700 |
| commit | 2365b0cbd64fe7a00ec2cfd3b7d8a20df640e095 (patch) | |
| tree | f7fb0615390ecc425f018fd6e0f186286c8690f8 /kernel/sched | |
| parent | 59512f4e49ab5723faec8d3404a704c163e8b744 (diff) | |
sched: tighten up jiffy to sched_clock mapping
The tick code already tracks exact time a tick is expected
to arrive. This can be used to eliminate slack in the jiffy
to sched_clock mapping that aligns windows between a caller
of sched_set_window and the scheduler itself.
Change-Id: I9d47466658d01e6857d7457405459436d504a2ca
Signed-off-by: Steve Muckle <smuckle@codeaurora.org>
[joonwoop@codeaurora.org: fixed minor conflict in include/linux/tick.h]
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel/sched')
| -rw-r--r-- | kernel/sched/core.c | 30 |
1 files changed, 15 insertions, 15 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 07aac49174dd..a7324abaeb3f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1243,8 +1243,6 @@ __read_mostly unsigned int sched_ravg_window = 10000000; unsigned int __read_mostly sched_disable_window_stats; static unsigned int sync_cpu; -static u64 sched_init_jiffy; -static u64 sched_clock_at_init_jiffy; #define EXITING_TASK_MARKER 0xdeaddead @@ -1916,23 +1914,16 @@ static inline void mark_task_starting(struct task_struct *p) p->ravg.mark_start = wallclock; } -static int update_alignment; - static inline void set_window_start(struct rq *rq) { int cpu = cpu_of(rq); struct rq *sync_rq = cpu_rq(sync_cpu); - if (cpu == sync_cpu && !update_alignment) { - sched_init_jiffy = get_jiffies_64(); - sched_clock_at_init_jiffy = sched_clock(); - } - if (rq->window_start || !sched_enable_hmp) return; if (cpu == sync_cpu) { - rq->window_start = sched_clock_at_init_jiffy; + rq->window_start = sched_clock(); } else { raw_spin_unlock(&rq->lock); double_rq_lock(rq, sync_rq); @@ -2165,20 +2156,29 @@ void sched_set_io_is_busy(int val) int sched_set_window(u64 window_start, unsigned int window_size) { - u64 ws, now; + u64 now, cur_jiffies, jiffy_sched_clock; + s64 ws; + unsigned long flags; if (sched_use_pelt || (window_size * TICK_NSEC < MIN_SCHED_RAVG_WINDOW)) return -EINVAL; mutex_lock(&policy_mutex); - update_alignment = 1; - ws = (window_start - sched_init_jiffy); /* jiffy difference */ + /* Get a consistent view of sched_clock, jiffies, and the time + * since the last jiffy (based on last_jiffies_update). */ + local_irq_save(flags); + cur_jiffies = jiffy_to_sched_clock(&now, &jiffy_sched_clock); + local_irq_restore(flags); + + /* translate window_start from jiffies to nanoseconds */ + ws = (window_start - cur_jiffies); /* jiffy difference */ ws *= TICK_NSEC; - ws += sched_clock_at_init_jiffy; + ws += jiffy_sched_clock; - now = sched_clock(); + /* roll back calculated window start so that it is in + * the past (window stats must have a current window) */ while (ws > now) ws -= (window_size * TICK_NSEC); |
