summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSteve Muckle <smuckle@codeaurora.org>2014-10-14 13:51:34 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:01:02 -0700
commit2365b0cbd64fe7a00ec2cfd3b7d8a20df640e095 (patch)
treef7fb0615390ecc425f018fd6e0f186286c8690f8 /kernel
parent59512f4e49ab5723faec8d3404a704c163e8b744 (diff)
sched: tighten up jiffy to sched_clock mapping
The tick code already tracks exact time a tick is expected to arrive. This can be used to eliminate slack in the jiffy to sched_clock mapping that aligns windows between a caller of sched_set_window and the scheduler itself. Change-Id: I9d47466658d01e6857d7457405459436d504a2ca Signed-off-by: Steve Muckle <smuckle@codeaurora.org> [joonwoop@codeaurora.org: fixed minor conflict in include/linux/tick.h] Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c30
-rw-r--r--kernel/time/tick-sched.c32
2 files changed, 47 insertions, 15 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 07aac49174dd..a7324abaeb3f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1243,8 +1243,6 @@ __read_mostly unsigned int sched_ravg_window = 10000000;
unsigned int __read_mostly sched_disable_window_stats;
static unsigned int sync_cpu;
-static u64 sched_init_jiffy;
-static u64 sched_clock_at_init_jiffy;
#define EXITING_TASK_MARKER 0xdeaddead
@@ -1916,23 +1914,16 @@ static inline void mark_task_starting(struct task_struct *p)
p->ravg.mark_start = wallclock;
}
-static int update_alignment;
-
static inline void set_window_start(struct rq *rq)
{
int cpu = cpu_of(rq);
struct rq *sync_rq = cpu_rq(sync_cpu);
- if (cpu == sync_cpu && !update_alignment) {
- sched_init_jiffy = get_jiffies_64();
- sched_clock_at_init_jiffy = sched_clock();
- }
-
if (rq->window_start || !sched_enable_hmp)
return;
if (cpu == sync_cpu) {
- rq->window_start = sched_clock_at_init_jiffy;
+ rq->window_start = sched_clock();
} else {
raw_spin_unlock(&rq->lock);
double_rq_lock(rq, sync_rq);
@@ -2165,20 +2156,29 @@ void sched_set_io_is_busy(int val)
int sched_set_window(u64 window_start, unsigned int window_size)
{
- u64 ws, now;
+ u64 now, cur_jiffies, jiffy_sched_clock;
+ s64 ws;
+ unsigned long flags;
if (sched_use_pelt ||
(window_size * TICK_NSEC < MIN_SCHED_RAVG_WINDOW))
return -EINVAL;
mutex_lock(&policy_mutex);
- update_alignment = 1;
- ws = (window_start - sched_init_jiffy); /* jiffy difference */
+ /* Get a consistent view of sched_clock, jiffies, and the time
+ * since the last jiffy (based on last_jiffies_update). */
+ local_irq_save(flags);
+ cur_jiffies = jiffy_to_sched_clock(&now, &jiffy_sched_clock);
+ local_irq_restore(flags);
+
+ /* translate window_start from jiffies to nanoseconds */
+ ws = (window_start - cur_jiffies); /* jiffy difference */
ws *= TICK_NSEC;
- ws += sched_clock_at_init_jiffy;
+ ws += jiffy_sched_clock;
- now = sched_clock();
+ /* roll back calculated window start so that it is in
+ * the past (window stats must have a current window) */
while (ws > now)
ws -= (window_size * TICK_NSEC);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index b0741801c4c7..c1f3aca1a01d 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -46,6 +46,38 @@ static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
*/
static ktime_t last_jiffies_update;
+/*
+ * Conversion from ktime to sched_clock is error prone. Use this
+ * as a safetly margin when calculating the sched_clock value at
+ * a particular jiffy as last_jiffies_update uses ktime.
+ */
+#define SCHED_CLOCK_MARGIN 100000
+
+static u64 ns_since_jiffy(void)
+{
+ ktime_t delta;
+
+ delta = ktime_sub(ktime_get(), last_jiffies_update);
+
+ return ktime_to_ns(delta);
+}
+
+u64 jiffy_to_sched_clock(u64 *now, u64 *jiffy_sched_clock)
+{
+ u64 cur_jiffies;
+ unsigned long seq;
+
+ do {
+ seq = read_seqbegin(&jiffies_lock);
+ *now = sched_clock();
+ *jiffy_sched_clock = *now -
+ (ns_since_jiffy() + SCHED_CLOCK_MARGIN);
+ cur_jiffies = get_jiffies_64();
+ } while (read_seqretry(&jiffies_lock, seq));
+
+ return cur_jiffies;
+}
+
struct tick_sched *tick_get_tick_sched(int cpu)
{
return &per_cpu(tick_cpu_sched, cpu);