summaryrefslogtreecommitdiff
path: root/kernel/time
diff options
context:
space:
mode:
authorSteve Muckle <smuckle@codeaurora.org>2014-10-14 13:51:34 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:01:02 -0700
commit2365b0cbd64fe7a00ec2cfd3b7d8a20df640e095 (patch)
treef7fb0615390ecc425f018fd6e0f186286c8690f8 /kernel/time
parent59512f4e49ab5723faec8d3404a704c163e8b744 (diff)
sched: tighten up jiffy to sched_clock mapping
The tick code already tracks exact time a tick is expected to arrive. This can be used to eliminate slack in the jiffy to sched_clock mapping that aligns windows between a caller of sched_set_window and the scheduler itself. Change-Id: I9d47466658d01e6857d7457405459436d504a2ca Signed-off-by: Steve Muckle <smuckle@codeaurora.org> [joonwoop@codeaurora.org: fixed minor conflict in include/linux/tick.h] Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/tick-sched.c32
1 files changed, 32 insertions, 0 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index b0741801c4c7..c1f3aca1a01d 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -46,6 +46,38 @@ static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
*/
static ktime_t last_jiffies_update;
+/*
+ * Conversion from ktime to sched_clock is error prone. Use this
+ * as a safetly margin when calculating the sched_clock value at
+ * a particular jiffy as last_jiffies_update uses ktime.
+ */
+#define SCHED_CLOCK_MARGIN 100000
+
+static u64 ns_since_jiffy(void)
+{
+ ktime_t delta;
+
+ delta = ktime_sub(ktime_get(), last_jiffies_update);
+
+ return ktime_to_ns(delta);
+}
+
+u64 jiffy_to_sched_clock(u64 *now, u64 *jiffy_sched_clock)
+{
+ u64 cur_jiffies;
+ unsigned long seq;
+
+ do {
+ seq = read_seqbegin(&jiffies_lock);
+ *now = sched_clock();
+ *jiffy_sched_clock = *now -
+ (ns_since_jiffy() + SCHED_CLOCK_MARGIN);
+ cur_jiffies = get_jiffies_64();
+ } while (read_seqretry(&jiffies_lock, seq));
+
+ return cur_jiffies;
+}
+
struct tick_sched *tick_get_tick_sched(int cpu)
{
return &per_cpu(tick_cpu_sched, cpu);