diff options
Diffstat (limited to 'kernel/time/tick-sched.c')
-rw-r--r-- | kernel/time/tick-sched.c | 98 |
1 files changed, 94 insertions, 4 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index a935cbdc55a4..6579be96e041 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -19,11 +19,13 @@ #include <linux/percpu.h> #include <linux/profile.h> #include <linux/sched.h> +#include <linux/timer.h> #include <linux/module.h> #include <linux/irq_work.h> #include <linux/posix-timers.h> #include <linux/perf_event.h> #include <linux/context_tracking.h> +#include <linux/rq_stats.h> #include <asm/irq_regs.h> @@ -31,6 +33,10 @@ #include <trace/events/timer.h> +struct rq_data rq_info; +struct workqueue_struct *rq_wq; +spinlock_t rq_lock; + /* * Per cpu nohz control structure */ @@ -41,6 +47,21 @@ static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); */ static ktime_t last_jiffies_update; +u64 jiffy_to_ktime_ns(u64 *now, u64 *jiffy_ktime_ns) +{ + u64 cur_jiffies; + unsigned long seq; + + do { + seq = read_seqbegin(&jiffies_lock); + *now = ktime_get_ns(); + *jiffy_ktime_ns = ktime_to_ns(last_jiffies_update); + cur_jiffies = get_jiffies_64(); + } while (read_seqretry(&jiffies_lock, seq)); + + return cur_jiffies; +} + struct tick_sched *tick_get_tick_sched(int cpu) { return &per_cpu(tick_cpu_sched, cpu); @@ -143,7 +164,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) * when we go busy again does not account too much ticks. */ if (ts->tick_stopped) { - touch_softlockup_watchdog(); + touch_softlockup_watchdog_sched(); if (is_idle_task(current)) ts->idle_jiffies++; } @@ -430,7 +451,7 @@ static void tick_nohz_update_jiffies(ktime_t now) tick_do_update_jiffies64(now); local_irq_restore(flags); - touch_softlockup_watchdog(); + touch_softlockup_watchdog_sched(); } /* @@ -716,7 +737,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) update_cpu_load_nohz(); calc_load_exit_idle(); - touch_softlockup_watchdog(); + touch_softlockup_watchdog_sched(); /* * Cancel the scheduled timer and restore the tick */ @@ -804,6 +825,11 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts) now = tick_nohz_start_idle(ts); +#ifdef CONFIG_SMP + if (check_pending_deferrable_timers(cpu)) + raise_softirq_irqoff(TIMER_SOFTIRQ); +#endif + if (can_stop_idle_tick(cpu, ts)) { int was_stopped = ts->tick_stopped; @@ -1076,6 +1102,51 @@ void tick_irq_enter(void) * High resolution timer specific code */ #ifdef CONFIG_HIGH_RES_TIMERS +static void update_rq_stats(void) +{ + unsigned long jiffy_gap = 0; + unsigned int rq_avg = 0; + unsigned long flags = 0; + + jiffy_gap = jiffies - rq_info.rq_poll_last_jiffy; + + if (jiffy_gap >= rq_info.rq_poll_jiffies) { + + spin_lock_irqsave(&rq_lock, flags); + + if (!rq_info.rq_avg) + rq_info.rq_poll_total_jiffies = 0; + + rq_avg = nr_running() * 10; + + if (rq_info.rq_poll_total_jiffies) { + rq_avg = (rq_avg * jiffy_gap) + + (rq_info.rq_avg * + rq_info.rq_poll_total_jiffies); + do_div(rq_avg, + rq_info.rq_poll_total_jiffies + jiffy_gap); + } + + rq_info.rq_avg = rq_avg; + rq_info.rq_poll_total_jiffies += jiffy_gap; + rq_info.rq_poll_last_jiffy = jiffies; + + spin_unlock_irqrestore(&rq_lock, flags); + } +} + +static void wakeup_user(void) +{ + unsigned long jiffy_gap; + + jiffy_gap = jiffies - rq_info.def_timer_last_jiffy; + + if (jiffy_gap >= rq_info.def_timer_jiffies) { + rq_info.def_timer_last_jiffy = jiffies; + queue_work(rq_wq, &rq_info.def_timer_work); + } +} + /* * We rearm the timer until we get disabled by the idle code. * Called with interrupts disabled. @@ -1093,9 +1164,23 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) * Do not call, when we are not in irq context and have * no valid regs pointer */ - if (regs) + if (regs) { tick_sched_handle(ts, regs); + if (rq_info.init == 1 && + tick_do_timer_cpu == smp_processor_id()) { + /* + * update run queue statistics + */ + update_rq_stats(); + + /* + * wakeup user if needed + */ + wakeup_user(); + } + } + /* No need to reprogram if we are in idle or full dynticks mode */ if (unlikely(ts->tick_stopped)) return HRTIMER_NORESTART; @@ -1208,3 +1293,8 @@ int tick_check_oneshot_change(int allow_nohz) tick_nohz_switch_to_nohz(); return 0; } + +ktime_t * get_next_event_cpu(unsigned int cpu) +{ + return &(per_cpu(tick_cpu_device, cpu).evtdev->next_event); +} |