diff options
Diffstat (limited to 'drivers/clocksource')
25 files changed, 1782 insertions, 242 deletions
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index f151c6cf27c3..b7b9b040a89b 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -21,10 +21,17 @@ config DW_APB_TIMER config DW_APB_TIMER_OF bool + select DW_APB_TIMER + select CLKSRC_OF config ARMADA_370_XP_TIMER bool +config ORION_TIMER + select CLKSRC_OF + select CLKSRC_MMIO + bool + config SUN4I_TIMER bool @@ -67,6 +74,19 @@ config ARM_ARCH_TIMER bool select CLKSRC_OF if OF +config ARM_GLOBAL_TIMER + bool + select CLKSRC_OF if OF + help + This options enables support for the ARM global timer unit + +config CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK + bool + depends on ARM_GLOBAL_TIMER + default y + help + Use ARM global timer clock source as sched_clock + config CLKSRC_METAG_GENERIC def_bool y if METAG help @@ -85,3 +105,8 @@ config CLKSRC_SAMSUNG_PWM Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver for all devicetree enabled platforms. This driver will be needed only on systems that do not have the Exynos MCT available. + +config VF_PIT_TIMER + bool + help + Support for Period Interrupt Timer on Freescale Vybrid Family SoCs. diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 8d979c72aa94..704d6d342adc 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -15,17 +15,23 @@ obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o +obj-$(CONFIG_ORION_TIMER) += time-orion.o obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o obj-$(CONFIG_ARCH_MARCO) += timer-marco.o +obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o obj-$(CONFIG_ARCH_MXS) += mxs_timer.o obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o +obj-$(CONFIG_ARCH_NSPIRE) += zevio-timer.o obj-$(CONFIG_ARCH_BCM) += bcm_kona_timer.o obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o +obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o +obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o +obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index a2b254189782..fbd9ccd5e114 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -16,13 +16,39 @@ #include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/of_irq.h> +#include <linux/of_address.h> #include <linux/io.h> +#include <linux/slab.h> #include <asm/arch_timer.h> #include <asm/virt.h> #include <clocksource/arm_arch_timer.h> +#define CNTTIDR 0x08 +#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4)) + +#define CNTVCT_LO 0x08 +#define CNTVCT_HI 0x0c +#define CNTFRQ 0x10 +#define CNTP_TVAL 0x28 +#define CNTP_CTL 0x2c +#define CNTV_TVAL 0x38 +#define CNTV_CTL 0x3c + +#define ARCH_CP15_TIMER BIT(0) +#define ARCH_MEM_TIMER BIT(1) +static unsigned arch_timers_present __initdata; + +static void __iomem *arch_counter_base; + +struct arch_timer { + void __iomem *base; + struct clock_event_device evt; +}; + +#define to_arch_timer(e) container_of(e, struct arch_timer, evt) + static u32 arch_timer_rate; enum ppi_nr { @@ -38,19 +64,83 @@ static int arch_timer_ppi[MAX_TIMER_PPI]; static struct clock_event_device __percpu *arch_timer_evt; static bool arch_timer_use_virtual = true; +static bool arch_timer_mem_use_virtual; /* * Architected system timer support. */ -static inline irqreturn_t timer_handler(const int access, +static __always_inline +void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val, + struct clock_event_device *clk) +{ + if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { + struct arch_timer *timer = to_arch_timer(clk); + switch (reg) { + case ARCH_TIMER_REG_CTRL: + writel_relaxed(val, timer->base + CNTP_CTL); + break; + case ARCH_TIMER_REG_TVAL: + writel_relaxed(val, timer->base + CNTP_TVAL); + break; + } + } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { + struct arch_timer *timer = to_arch_timer(clk); + switch (reg) { + case ARCH_TIMER_REG_CTRL: + writel_relaxed(val, timer->base + CNTV_CTL); + break; + case ARCH_TIMER_REG_TVAL: + writel_relaxed(val, timer->base + CNTV_TVAL); + break; + } + } else { + arch_timer_reg_write_cp15(access, reg, val); + } +} + +static __always_inline +u32 arch_timer_reg_read(int access, enum arch_timer_reg reg, + struct clock_event_device *clk) +{ + u32 val; + + if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { + struct arch_timer *timer = to_arch_timer(clk); + switch (reg) { + case ARCH_TIMER_REG_CTRL: + val = readl_relaxed(timer->base + CNTP_CTL); + break; + case ARCH_TIMER_REG_TVAL: + val = readl_relaxed(timer->base + CNTP_TVAL); + break; + } + } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { + struct arch_timer *timer = to_arch_timer(clk); + switch (reg) { + case ARCH_TIMER_REG_CTRL: + val = readl_relaxed(timer->base + CNTV_CTL); + break; + case ARCH_TIMER_REG_TVAL: + val = readl_relaxed(timer->base + CNTV_TVAL); + break; + } + } else { + val = arch_timer_reg_read_cp15(access, reg); + } + + return val; +} + +static __always_inline irqreturn_t timer_handler(const int access, struct clock_event_device *evt) { unsigned long ctrl; - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); + + ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt); if (ctrl & ARCH_TIMER_CTRL_IT_STAT) { ctrl |= ARCH_TIMER_CTRL_IT_MASK; - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); + arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt); evt->event_handler(evt); return IRQ_HANDLED; } @@ -72,15 +162,30 @@ static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id) return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt); } -static inline void timer_set_mode(const int access, int mode) +static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id) +{ + struct clock_event_device *evt = dev_id; + + return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt); +} + +static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id) +{ + struct clock_event_device *evt = dev_id; + + return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt); +} + +static __always_inline void timer_set_mode(const int access, int mode, + struct clock_event_device *clk) { unsigned long ctrl; switch (mode) { case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); + ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); ctrl &= ~ARCH_TIMER_CTRL_ENABLE; - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); + arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); break; default: break; @@ -90,60 +195,108 @@ static inline void timer_set_mode(const int access, int mode) static void arch_timer_set_mode_virt(enum clock_event_mode mode, struct clock_event_device *clk) { - timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode); + timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode, clk); } static void arch_timer_set_mode_phys(enum clock_event_mode mode, struct clock_event_device *clk) { - timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode); + timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode, clk); +} + +static void arch_timer_set_mode_virt_mem(enum clock_event_mode mode, + struct clock_event_device *clk) +{ + timer_set_mode(ARCH_TIMER_MEM_VIRT_ACCESS, mode, clk); } -static inline void set_next_event(const int access, unsigned long evt) +static void arch_timer_set_mode_phys_mem(enum clock_event_mode mode, + struct clock_event_device *clk) +{ + timer_set_mode(ARCH_TIMER_MEM_PHYS_ACCESS, mode, clk); +} + +static __always_inline void set_next_event(const int access, unsigned long evt, + struct clock_event_device *clk) { unsigned long ctrl; - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); + ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); ctrl |= ARCH_TIMER_CTRL_ENABLE; ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; - arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt); - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); + arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk); + arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); } static int arch_timer_set_next_event_virt(unsigned long evt, - struct clock_event_device *unused) + struct clock_event_device *clk) { - set_next_event(ARCH_TIMER_VIRT_ACCESS, evt); + set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk); return 0; } static int arch_timer_set_next_event_phys(unsigned long evt, - struct clock_event_device *unused) + struct clock_event_device *clk) { - set_next_event(ARCH_TIMER_PHYS_ACCESS, evt); + set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk); return 0; } -static int __cpuinit arch_timer_setup(struct clock_event_device *clk) +static int arch_timer_set_next_event_virt_mem(unsigned long evt, + struct clock_event_device *clk) { - clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; - clk->name = "arch_sys_timer"; - clk->rating = 450; - if (arch_timer_use_virtual) { - clk->irq = arch_timer_ppi[VIRT_PPI]; - clk->set_mode = arch_timer_set_mode_virt; - clk->set_next_event = arch_timer_set_next_event_virt; + set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk); + return 0; +} + +static int arch_timer_set_next_event_phys_mem(unsigned long evt, + struct clock_event_device *clk) +{ + set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk); + return 0; +} + +static void __arch_timer_setup(unsigned type, + struct clock_event_device *clk) +{ + clk->features = CLOCK_EVT_FEAT_ONESHOT; + + if (type == ARCH_CP15_TIMER) { + clk->features |= CLOCK_EVT_FEAT_C3STOP; + clk->name = "arch_sys_timer"; + clk->rating = 450; + clk->cpumask = cpumask_of(smp_processor_id()); + if (arch_timer_use_virtual) { + clk->irq = arch_timer_ppi[VIRT_PPI]; + clk->set_mode = arch_timer_set_mode_virt; + clk->set_next_event = arch_timer_set_next_event_virt; + } else { + clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; + clk->set_mode = arch_timer_set_mode_phys; + clk->set_next_event = arch_timer_set_next_event_phys; + } } else { - clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; - clk->set_mode = arch_timer_set_mode_phys; - clk->set_next_event = arch_timer_set_next_event_phys; + clk->name = "arch_mem_timer"; + clk->rating = 400; + clk->cpumask = cpu_all_mask; + if (arch_timer_mem_use_virtual) { + clk->set_mode = arch_timer_set_mode_virt_mem; + clk->set_next_event = + arch_timer_set_next_event_virt_mem; + } else { + clk->set_mode = arch_timer_set_mode_phys_mem; + clk->set_next_event = + arch_timer_set_next_event_phys_mem; + } } - clk->cpumask = cpumask_of(smp_processor_id()); + clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, clk); - clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL); + clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff); +} - clockevents_config_and_register(clk, arch_timer_rate, - 0xf, 0x7fffffff); +static int arch_timer_setup(struct clock_event_device *clk) +{ + __arch_timer_setup(ARCH_CP15_TIMER, clk); if (arch_timer_use_virtual) enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0); @@ -158,27 +311,41 @@ static int __cpuinit arch_timer_setup(struct clock_event_device *clk) return 0; } -static int arch_timer_available(void) +static void +arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np) { - u32 freq; - - if (arch_timer_rate == 0) { - freq = arch_timer_get_cntfrq(); - - /* Check the timer frequency. */ - if (freq == 0) { - pr_warn("Architected timer frequency not available\n"); - return -EINVAL; - } + /* Who has more than one independent system counter? */ + if (arch_timer_rate) + return; - arch_timer_rate = freq; + /* Try to determine the frequency from the device tree or CNTFRQ */ + if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) { + if (cntbase) + arch_timer_rate = readl_relaxed(cntbase + CNTFRQ); + else + arch_timer_rate = arch_timer_get_cntfrq(); } - pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n", + /* Check the timer frequency. */ + if (arch_timer_rate == 0) + pr_warn("Architected timer frequency not available\n"); +} + +static void arch_timer_banner(unsigned type) +{ + pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n", + type & ARCH_CP15_TIMER ? "cp15" : "", + type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? " and " : "", + type & ARCH_MEM_TIMER ? "mmio" : "", (unsigned long)arch_timer_rate / 1000000, (unsigned long)(arch_timer_rate / 10000) % 100, - arch_timer_use_virtual ? "virt" : "phys"); - return 0; + type & ARCH_CP15_TIMER ? + arch_timer_use_virtual ? "virt" : "phys" : + "", + type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? "/" : "", + type & ARCH_MEM_TIMER ? + arch_timer_mem_use_virtual ? "virt" : "phys" : + ""); } u32 arch_timer_get_rate(void) @@ -186,18 +353,26 @@ u32 arch_timer_get_rate(void) return arch_timer_rate; } -/* - * Some external users of arch_timer_read_counter (e.g. sched_clock) may try to - * call it before it has been initialised. Rather than incur a performance - * penalty checking for initialisation, provide a default implementation that - * won't lead to time appearing to jump backwards. - */ -static u64 arch_timer_read_zero(void) +static u64 arch_counter_get_cntvct_mem(void) { - return 0; + u32 vct_lo, vct_hi, tmp_hi; + + do { + vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI); + vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO); + tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI); + } while (vct_hi != tmp_hi); + + return ((u64) vct_hi << 32) | vct_lo; } -u64 (*arch_timer_read_counter)(void) = arch_timer_read_zero; +/* + * Default to cp15 based access because arm64 uses this function for + * sched_clock() before DT is probed and the cp15 method is guaranteed + * to exist on arm64. arm doesn't use this before DT is probed so even + * if we don't have the cp15 accessors we won't have a problem. + */ +u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct; static cycle_t arch_counter_read(struct clocksource *cs) { @@ -229,7 +404,24 @@ struct timecounter *arch_timer_get_timecounter(void) return &timecounter; } -static void __cpuinit arch_timer_stop(struct clock_event_device *clk) +static void __init arch_counter_register(unsigned type) +{ + u64 start_count; + + /* Register the CP15 based counter if we have one */ + if (type & ARCH_CP15_TIMER) + arch_timer_read_counter = arch_counter_get_cntvct; + else + arch_timer_read_counter = arch_counter_get_cntvct_mem; + + start_count = arch_timer_read_counter(); + clocksource_register_hz(&clocksource_counter, arch_timer_rate); + cyclecounter.mult = clocksource_counter.mult; + cyclecounter.shift = clocksource_counter.shift; + timecounter_init(&timecounter, &cyclecounter, start_count); +} + +static void arch_timer_stop(struct clock_event_device *clk) { pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id()); @@ -245,7 +437,7 @@ static void __cpuinit arch_timer_stop(struct clock_event_device *clk) clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk); } -static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self, +static int arch_timer_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { /* @@ -264,7 +456,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block arch_timer_cpu_nb __cpuinitdata = { +static struct notifier_block arch_timer_cpu_nb = { .notifier_call = arch_timer_cpu_notify, }; @@ -273,22 +465,12 @@ static int __init arch_timer_register(void) int err; int ppi; - err = arch_timer_available(); - if (err) - goto out; - arch_timer_evt = alloc_percpu(struct clock_event_device); if (!arch_timer_evt) { err = -ENOMEM; goto out; } - clocksource_register_hz(&clocksource_counter, arch_timer_rate); - cyclecounter.mult = clocksource_counter.mult; - cyclecounter.shift = clocksource_counter.shift; - timecounter_init(&timecounter, &cyclecounter, - arch_counter_get_cntpct()); - if (arch_timer_use_virtual) { ppi = arch_timer_ppi[VIRT_PPI]; err = request_percpu_irq(ppi, arch_timer_handler_virt, @@ -339,24 +521,77 @@ out: return err; } +static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq) +{ + int ret; + irq_handler_t func; + struct arch_timer *t; + + t = kzalloc(sizeof(*t), GFP_KERNEL); + if (!t) + return -ENOMEM; + + t->base = base; + t->evt.irq = irq; + __arch_timer_setup(ARCH_MEM_TIMER, &t->evt); + + if (arch_timer_mem_use_virtual) + func = arch_timer_handler_virt_mem; + else + func = arch_timer_handler_phys_mem; + + ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt); + if (ret) { + pr_err("arch_timer: Failed to request mem timer irq\n"); + kfree(t); + } + + return ret; +} + +static const struct of_device_id arch_timer_of_match[] __initconst = { + { .compatible = "arm,armv7-timer", }, + { .compatible = "arm,armv8-timer", }, + {}, +}; + +static const struct of_device_id arch_timer_mem_of_match[] __initconst = { + { .compatible = "arm,armv7-timer-mem", }, + {}, +}; + +static void __init arch_timer_common_init(void) +{ + unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER; + + /* Wait until both nodes are probed if we have two timers */ + if ((arch_timers_present & mask) != mask) { + if (of_find_matching_node(NULL, arch_timer_mem_of_match) && + !(arch_timers_present & ARCH_MEM_TIMER)) + return; + if (of_find_matching_node(NULL, arch_timer_of_match) && + !(arch_timers_present & ARCH_CP15_TIMER)) + return; + } + + arch_timer_banner(arch_timers_present); + arch_counter_register(arch_timers_present); + arch_timer_arch_init(); +} + static void __init arch_timer_init(struct device_node *np) { - u32 freq; int i; - if (arch_timer_get_rate()) { + if (arch_timers_present & ARCH_CP15_TIMER) { pr_warn("arch_timer: multiple nodes in dt, skipping\n"); return; } - /* Try to determine the frequency from the device tree or CNTFRQ */ - if (!of_property_read_u32(np, "clock-frequency", &freq)) - arch_timer_rate = freq; - + arch_timers_present |= ARCH_CP15_TIMER; for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++) arch_timer_ppi[i] = irq_of_parse_and_map(np, i); - - of_node_put(np); + arch_timer_detect_rate(NULL, np); /* * If HYP mode is available, we know that the physical timer @@ -376,13 +611,74 @@ static void __init arch_timer_init(struct device_node *np) } } - if (arch_timer_use_virtual) - arch_timer_read_counter = arch_counter_get_cntvct; - else - arch_timer_read_counter = arch_counter_get_cntpct; - arch_timer_register(); - arch_timer_arch_init(); + arch_timer_common_init(); } CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_init); CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_init); + +static void __init arch_timer_mem_init(struct device_node *np) +{ + struct device_node *frame, *best_frame = NULL; + void __iomem *cntctlbase, *base; + unsigned int irq; + u32 cnttidr; + + arch_timers_present |= ARCH_MEM_TIMER; + cntctlbase = of_iomap(np, 0); + if (!cntctlbase) { + pr_err("arch_timer: Can't find CNTCTLBase\n"); + return; + } + + cnttidr = readl_relaxed(cntctlbase + CNTTIDR); + iounmap(cntctlbase); + + /* + * Try to find a virtual capable frame. Otherwise fall back to a + * physical capable frame. + */ + for_each_available_child_of_node(np, frame) { + int n; + + if (of_property_read_u32(frame, "frame-number", &n)) { + pr_err("arch_timer: Missing frame-number\n"); + of_node_put(best_frame); + of_node_put(frame); + return; + } + + if (cnttidr & CNTTIDR_VIRT(n)) { + of_node_put(best_frame); + best_frame = frame; + arch_timer_mem_use_virtual = true; + break; + } + of_node_put(best_frame); + best_frame = of_node_get(frame); + } + + base = arch_counter_base = of_iomap(best_frame, 0); + if (!base) { + pr_err("arch_timer: Can't map frame's registers\n"); + of_node_put(best_frame); + return; + } + + if (arch_timer_mem_use_virtual) + irq = irq_of_parse_and_map(best_frame, 1); + else + irq = irq_of_parse_and_map(best_frame, 0); + of_node_put(best_frame); + if (!irq) { + pr_err("arch_timer: Frame missing %s irq", + arch_timer_mem_use_virtual ? "virt" : "phys"); + return; + } + + arch_timer_detect_rate(base, np); + arch_timer_mem_register(base, irq); + arch_timer_common_init(); +} +CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem", + arch_timer_mem_init); diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c new file mode 100644 index 000000000000..b66c1f36066c --- /dev/null +++ b/drivers/clocksource/arm_global_timer.c @@ -0,0 +1,321 @@ +/* + * drivers/clocksource/arm_global_timer.c + * + * Copyright (C) 2013 STMicroelectronics (R&D) Limited. + * Author: Stuart Menefy <stuart.menefy@st.com> + * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/clocksource.h> +#include <linux/clockchips.h> +#include <linux/cpu.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> +#include <linux/sched_clock.h> + +#include <asm/cputype.h> + +#define GT_COUNTER0 0x00 +#define GT_COUNTER1 0x04 + +#define GT_CONTROL 0x08 +#define GT_CONTROL_TIMER_ENABLE BIT(0) /* this bit is NOT banked */ +#define GT_CONTROL_COMP_ENABLE BIT(1) /* banked */ +#define GT_CONTROL_IRQ_ENABLE BIT(2) /* banked */ +#define GT_CONTROL_AUTO_INC BIT(3) /* banked */ + +#define GT_INT_STATUS 0x0c +#define GT_INT_STATUS_EVENT_FLAG BIT(0) + +#define GT_COMP0 0x10 +#define GT_COMP1 0x14 +#define GT_AUTO_INC 0x18 + +/* + * We are expecting to be clocked by the ARM peripheral clock. + * + * Note: it is assumed we are using a prescaler value of zero, so this is + * the units for all operations. + */ +static void __iomem *gt_base; +static unsigned long gt_clk_rate; +static int gt_ppi; +static struct clock_event_device __percpu *gt_evt; + +/* + * To get the value from the Global Timer Counter register proceed as follows: + * 1. Read the upper 32-bit timer counter register + * 2. Read the lower 32-bit timer counter register + * 3. Read the upper 32-bit timer counter register again. If the value is + * different to the 32-bit upper value read previously, go back to step 2. + * Otherwise the 64-bit timer counter value is correct. + */ +static u64 gt_counter_read(void) +{ + u64 counter; + u32 lower; + u32 upper, old_upper; + + upper = readl_relaxed(gt_base + GT_COUNTER1); + do { + old_upper = upper; + lower = readl_relaxed(gt_base + GT_COUNTER0); + upper = readl_relaxed(gt_base + GT_COUNTER1); + } while (upper != old_upper); + + counter = upper; + counter <<= 32; + counter |= lower; + return counter; +} + +/** + * To ensure that updates to comparator value register do not set the + * Interrupt Status Register proceed as follows: + * 1. Clear the Comp Enable bit in the Timer Control Register. + * 2. Write the lower 32-bit Comparator Value Register. + * 3. Write the upper 32-bit Comparator Value Register. + * 4. Set the Comp Enable bit and, if necessary, the IRQ enable bit. + */ +static void gt_compare_set(unsigned long delta, int periodic) +{ + u64 counter = gt_counter_read(); + unsigned long ctrl; + + counter += delta; + ctrl = GT_CONTROL_TIMER_ENABLE; + writel(ctrl, gt_base + GT_CONTROL); + writel(lower_32_bits(counter), gt_base + GT_COMP0); + writel(upper_32_bits(counter), gt_base + GT_COMP1); + + if (periodic) { + writel(delta, gt_base + GT_AUTO_INC); + ctrl |= GT_CONTROL_AUTO_INC; + } + + ctrl |= GT_CONTROL_COMP_ENABLE | GT_CONTROL_IRQ_ENABLE; + writel(ctrl, gt_base + GT_CONTROL); +} + +static void gt_clockevent_set_mode(enum clock_event_mode mode, + struct clock_event_device *clk) +{ + unsigned long ctrl; + + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + gt_compare_set(DIV_ROUND_CLOSEST(gt_clk_rate, HZ), 1); + break; + case CLOCK_EVT_MODE_ONESHOT: + case CLOCK_EVT_MODE_UNUSED: + case CLOCK_EVT_MODE_SHUTDOWN: + ctrl = readl(gt_base + GT_CONTROL); + ctrl &= ~(GT_CONTROL_COMP_ENABLE | + GT_CONTROL_IRQ_ENABLE | GT_CONTROL_AUTO_INC); + writel(ctrl, gt_base + GT_CONTROL); + break; + default: + break; + } +} + +static int gt_clockevent_set_next_event(unsigned long evt, + struct clock_event_device *unused) +{ + gt_compare_set(evt, 0); + return 0; +} + +static irqreturn_t gt_clockevent_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evt = dev_id; + + if (!(readl_relaxed(gt_base + GT_INT_STATUS) & + GT_INT_STATUS_EVENT_FLAG)) + return IRQ_NONE; + + /** + * ERRATA 740657( Global Timer can send 2 interrupts for + * the same event in single-shot mode) + * Workaround: + * Either disable single-shot mode. + * Or + * Modify the Interrupt Handler to avoid the + * offending sequence. This is achieved by clearing + * the Global Timer flag _after_ having incremented + * the Comparator register value to a higher value. + */ + if (evt->mode == CLOCK_EVT_MODE_ONESHOT) + gt_compare_set(ULONG_MAX, 0); + + writel_relaxed(GT_INT_STATUS_EVENT_FLAG, gt_base + GT_INT_STATUS); + evt->event_handler(evt); + + return IRQ_HANDLED; +} + +static int gt_clockevents_init(struct clock_event_device *clk) +{ + int cpu = smp_processor_id(); + + clk->name = "arm_global_timer"; + clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; + clk->set_mode = gt_clockevent_set_mode; + clk->set_next_event = gt_clockevent_set_next_event; + clk->cpumask = cpumask_of(cpu); + clk->rating = 300; + clk->irq = gt_ppi; + clockevents_config_and_register(clk, gt_clk_rate, + 1, 0xffffffff); + enable_percpu_irq(clk->irq, IRQ_TYPE_NONE); + return 0; +} + +static void gt_clockevents_stop(struct clock_event_device *clk) +{ + gt_clockevent_set_mode(CLOCK_EVT_MODE_UNUSED, clk); + disable_percpu_irq(clk->irq); +} + +static cycle_t gt_clocksource_read(struct clocksource *cs) +{ + return gt_counter_read(); +} + +static struct clocksource gt_clocksource = { + .name = "arm_global_timer", + .rating = 300, + .read = gt_clocksource_read, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK +static u32 notrace gt_sched_clock_read(void) +{ + return gt_counter_read(); +} +#endif + +static void __init gt_clocksource_init(void) +{ + writel(0, gt_base + GT_CONTROL); + writel(0, gt_base + GT_COUNTER0); + writel(0, gt_base + GT_COUNTER1); + /* enables timer on all the cores */ + writel(GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL); + +#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK + setup_sched_clock(gt_sched_clock_read, 32, gt_clk_rate); +#endif + clocksource_register_hz(>_clocksource, gt_clk_rate); +} + +static int gt_cpu_notify(struct notifier_block *self, unsigned long action, + void *hcpu) +{ + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_STARTING: + gt_clockevents_init(this_cpu_ptr(gt_evt)); + break; + case CPU_DYING: + gt_clockevents_stop(this_cpu_ptr(gt_evt)); + break; + } + + return NOTIFY_OK; +} +static struct notifier_block gt_cpu_nb = { + .notifier_call = gt_cpu_notify, +}; + +static void __init global_timer_of_register(struct device_node *np) +{ + struct clk *gt_clk; + int err = 0; + + /* + * In r2p0 the comparators for each processor with the global timer + * fire when the timer value is greater than or equal to. In previous + * revisions the comparators fired when the timer value was equal to. + */ + if ((read_cpuid_id() & 0xf0000f) < 0x200000) { + pr_warn("global-timer: non support for this cpu version.\n"); + return; + } + + gt_ppi = irq_of_parse_and_map(np, 0); + if (!gt_ppi) { + pr_warn("global-timer: unable to parse irq\n"); + return; + } + + gt_base = of_iomap(np, 0); + if (!gt_base) { + pr_warn("global-timer: invalid base address\n"); + return; + } + + gt_clk = of_clk_get(np, 0); + if (!IS_ERR(gt_clk)) { + err = clk_prepare_enable(gt_clk); + if (err) + goto out_unmap; + } else { + pr_warn("global-timer: clk not found\n"); + err = -EINVAL; + goto out_unmap; + } + + gt_clk_rate = clk_get_rate(gt_clk); + gt_evt = alloc_percpu(struct clock_event_device); + if (!gt_evt) { + pr_warn("global-timer: can't allocate memory\n"); + err = -ENOMEM; + goto out_clk; + } + + err = request_percpu_irq(gt_ppi, gt_clockevent_interrupt, + "gt", gt_evt); + if (err) { + pr_warn("global-timer: can't register interrupt %d (%d)\n", + gt_ppi, err); + goto out_free; + } + + err = register_cpu_notifier(>_cpu_nb); + if (err) { + pr_warn("global-timer: unable to register cpu notifier.\n"); + goto out_irq; + } + + /* Immediately configure the timer on the boot CPU */ + gt_clocksource_init(); + gt_clockevents_init(this_cpu_ptr(gt_evt)); + + return; + +out_irq: + free_percpu_irq(gt_ppi, gt_evt); +out_free: + free_percpu(gt_evt); +out_clk: + clk_disable_unprepare(gt_clk); +out_unmap: + iounmap(gt_base); + WARN(err, "ARM Global timer register failed (%d)\n", err); +} + +/* Only tested on r2p2 and r3p0 */ +CLOCKSOURCE_OF_DECLARE(arm_gt, "arm,cortex-a9-global-timer", + global_timer_of_register); diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c index 766611d29945..07ea7ce900dc 100644 --- a/drivers/clocksource/bcm2835_timer.c +++ b/drivers/clocksource/bcm2835_timer.c @@ -28,8 +28,8 @@ #include <linux/of_platform.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/sched_clock.h> -#include <asm/sched_clock.h> #include <asm/irq.h> #define REG_CONTROL 0x00 diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c index 350f49356458..ba3d85904c9a 100644 --- a/drivers/clocksource/bcm_kona_timer.c +++ b/drivers/clocksource/bcm_kona_timer.c @@ -103,16 +103,10 @@ static const struct of_device_id bcm_timer_ids[] __initconst = { {}, }; -static void __init kona_timers_init(void) +static void __init kona_timers_init(struct device_node *node) { - struct device_node *node; u32 freq; - node = of_find_matching_node(NULL, bcm_timer_ids); - - if (!node) - panic("No timer"); - if (!of_property_read_u32(node, "clock-frequency", &freq)) arch_timer_rate = freq; else @@ -199,13 +193,12 @@ static struct irqaction kona_timer_irq = { .handler = kona_timer_interrupt, }; -static void __init kona_timer_init(void) +static void __init kona_timer_init(struct device_node *node) { - kona_timers_init(); + kona_timers_init(node); kona_timer_clockevents_init(); setup_irq(timers.tmr_irq, &kona_timer_irq); kona_timer_set_next_event((arch_timer_rate / HZ), NULL); } -CLOCKSOURCE_OF_DECLARE(bcm_kona, "bcm,kona-timer", - kona_timer_init); +CLOCKSOURCE_OF_DECLARE(bcm_kona, "bcm,kona-timer", kona_timer_init); diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c index 685bc60e210a..b2bb3a4bc205 100644 --- a/drivers/clocksource/cadence_ttc_timer.c +++ b/drivers/clocksource/cadence_ttc_timer.c @@ -21,7 +21,7 @@ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/slab.h> -#include <linux/clk-provider.h> +#include <linux/sched_clock.h> /* * This driver configures the 2 16-bit count-up timers as follows: @@ -51,6 +51,8 @@ #define TTC_CNT_CNTRL_DISABLE_MASK 0x1 +#define TTC_CLK_CNTRL_CSRC_MASK (1 << 5) /* clock source */ + /* * Setup the timers to use pre-scaling, using a fixed value for now that will * work across most input frequency, but it may need to be more dynamic @@ -93,6 +95,8 @@ struct ttc_timer_clockevent { #define to_ttc_timer_clkevent(x) \ container_of(x, struct ttc_timer_clockevent, ce) +static void __iomem *ttc_sched_clock_val_reg; + /** * ttc_set_interval - Set the timer interval value * @@ -154,6 +158,11 @@ static cycle_t __ttc_clocksource_read(struct clocksource *cs) TTC_COUNT_VAL_OFFSET); } +static u32 notrace ttc_sched_clock_read(void) +{ + return __raw_readl(ttc_sched_clock_val_reg); +} + /** * ttc_set_next_event - Sets the time interval for next event * @@ -295,6 +304,10 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base) kfree(ttccs); return; } + + ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET; + setup_sched_clock(ttc_sched_clock_read, 16, + clk_get_rate(ttccs->ttc.clk) / PRESCALE); } static int ttc_rate_change_clockevent_cb(struct notifier_block *nb, @@ -396,8 +409,9 @@ static void __init ttc_timer_init(struct device_node *timer) { unsigned int irq; void __iomem *timer_baseaddr; - struct clk *clk; + struct clk *clk_cs, *clk_ce; static int initialized; + int clksel; if (initialized) return; @@ -421,14 +435,24 @@ static void __init ttc_timer_init(struct device_node *timer) BUG(); } - clk = of_clk_get_by_name(timer, "cpu_1x"); - if (IS_ERR(clk)) { + clksel = __raw_readl(timer_baseaddr + TTC_CLK_CNTRL_OFFSET); + clksel = !!(clksel & TTC_CLK_CNTRL_CSRC_MASK); + clk_cs = of_clk_get(timer, clksel); + if (IS_ERR(clk_cs)) { + pr_err("ERROR: timer input clock not found\n"); + BUG(); + } + + clksel = __raw_readl(timer_baseaddr + 4 + TTC_CLK_CNTRL_OFFSET); + clksel = !!(clksel & TTC_CLK_CNTRL_CSRC_MASK); + clk_ce = of_clk_get(timer, clksel); + if (IS_ERR(clk_ce)) { pr_err("ERROR: timer input clock not found\n"); BUG(); } - ttc_setup_clocksource(clk, timer_baseaddr); - ttc_setup_clockevent(clk, timer_baseaddr + 4, irq); + ttc_setup_clocksource(clk_cs, timer_baseaddr); + ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq); pr_info("%s #0 at %p, irq=%d\n", timer->name, timer_baseaddr, irq); } diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c index 54f3d119d99c..a9fd4ad25674 100644 --- a/drivers/clocksource/clksrc-dbx500-prcmu.c +++ b/drivers/clocksource/clksrc-dbx500-prcmu.c @@ -10,12 +10,11 @@ * DBx500-PRCMU Timer * The PRCMU has 5 timers which are available in a always-on * power domain. We use the Timer 4 for our always-on clock - * source on DB8500 and Timer 3 on DB5500. + * source on DB8500. */ #include <linux/clockchips.h> #include <linux/clksrc-dbx500-prcmu.h> - -#include <asm/sched_clock.h> +#include <linux/sched_clock.h> #define RATE_32K 32768 @@ -30,15 +29,14 @@ static void __iomem *clksrc_dbx500_timer_base; -static cycle_t clksrc_dbx500_prcmu_read(struct clocksource *cs) +static cycle_t notrace clksrc_dbx500_prcmu_read(struct clocksource *cs) { + void __iomem *base = clksrc_dbx500_timer_base; u32 count, count2; do { - count = readl(clksrc_dbx500_timer_base + - PRCMU_TIMER_DOWNCOUNT); - count2 = readl(clksrc_dbx500_timer_base + - PRCMU_TIMER_DOWNCOUNT); + count = readl_relaxed(base + PRCMU_TIMER_DOWNCOUNT); + count2 = readl_relaxed(base + PRCMU_TIMER_DOWNCOUNT); } while (count2 != count); /* Negate because the timer is a decrementing counter */ diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c new file mode 100644 index 000000000000..b3eb582d6a6f --- /dev/null +++ b/drivers/clocksource/dummy_timer.c @@ -0,0 +1,69 @@ +/* + * linux/drivers/clocksource/dummy_timer.c + * + * Copyright (C) 2013 ARM Ltd. + * All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/clockchips.h> +#include <linux/cpu.h> +#include <linux/init.h> +#include <linux/percpu.h> +#include <linux/cpumask.h> + +static DEFINE_PER_CPU(struct clock_event_device, dummy_timer_evt); + +static void dummy_timer_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + /* + * Core clockevents code will call this when exchanging timer devices. + * We don't need to do anything here. + */ +} + +static void dummy_timer_setup(void) +{ + int cpu = smp_processor_id(); + struct clock_event_device *evt = __this_cpu_ptr(&dummy_timer_evt); + + evt->name = "dummy_timer"; + evt->features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_DUMMY; + evt->rating = 100; + evt->set_mode = dummy_timer_set_mode; + evt->cpumask = cpumask_of(cpu); + + clockevents_register_device(evt); +} + +static int dummy_timer_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING) + dummy_timer_setup(); + + return NOTIFY_OK; +} + +static struct notifier_block dummy_timer_cpu_nb = { + .notifier_call = dummy_timer_cpu_notify, +}; + +static int __init dummy_timer_register(void) +{ + int err = register_cpu_notifier(&dummy_timer_cpu_nb); + if (err) + return err; + + /* We won't get a call on the boot CPU, so register immediately */ + if (num_possible_cpus() > 1) + dummy_timer_setup(); + + return 0; +} +early_initcall(dummy_timer_register); diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c index 8c2a35f26d9b..e54ca1062d8e 100644 --- a/drivers/clocksource/dw_apb_timer.c +++ b/drivers/clocksource/dw_apb_timer.c @@ -387,15 +387,3 @@ cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs) { return (cycle_t)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE); } - -/** - * dw_apb_clocksource_unregister() - unregister and free a clocksource. - * - * @dw_cs: The clocksource to unregister/free. - */ -void dw_apb_clocksource_unregister(struct dw_apb_clocksource *dw_cs) -{ - clocksource_unregister(&dw_cs->cs); - - kfree(dw_cs); -} diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c index ab09ed3742ee..4cbae4f762b1 100644 --- a/drivers/clocksource/dw_apb_timer_of.c +++ b/drivers/clocksource/dw_apb_timer_of.c @@ -20,21 +20,43 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> - -#include <asm/mach/time.h> -#include <asm/sched_clock.h> +#include <linux/clk.h> +#include <linux/sched_clock.h> static void timer_get_base_and_rate(struct device_node *np, void __iomem **base, u32 *rate) { + struct clk *timer_clk; + struct clk *pclk; + *base = of_iomap(np, 0); if (!*base) panic("Unable to map regs for %s", np->name); + /* + * Not all implementations use a periphal clock, so don't panic + * if it's not present + */ + pclk = of_clk_get_by_name(np, "pclk"); + if (!IS_ERR(pclk)) + if (clk_prepare_enable(pclk)) + pr_warn("pclk for %s is present, but could not be activated\n", + np->name); + + timer_clk = of_clk_get_by_name(np, "timer"); + if (IS_ERR(timer_clk)) + goto try_clock_freq; + + if (!clk_prepare_enable(timer_clk)) { + *rate = clk_get_rate(timer_clk); + return; + } + +try_clock_freq: if (of_property_read_u32(np, "clock-freq", rate) && of_property_read_u32(np, "clock-frequency", rate)) - panic("No clock-frequency property for %s", np->name); + panic("No clock nor clock-frequency property for %s", np->name); } static void add_clockevent(struct device_node *event_timer) @@ -44,7 +66,7 @@ static void add_clockevent(struct device_node *event_timer) u32 irq, rate; irq = irq_of_parse_and_map(event_timer, 0); - if (irq == NO_IRQ) + if (irq == 0) panic("No IRQ for clock event timer"); timer_get_base_and_rate(event_timer, &iobase, &rate); @@ -57,6 +79,9 @@ static void add_clockevent(struct device_node *event_timer) dw_apb_clockevent_register(ced); } +static void __iomem *sched_io_base; +static u32 sched_rate; + static void add_clocksource(struct device_node *source_timer) { void __iomem *iobase; @@ -71,9 +96,15 @@ static void add_clocksource(struct device_node *source_timer) dw_apb_clocksource_start(cs); dw_apb_clocksource_register(cs); -} -static void __iomem *sched_io_base; + /* + * Fallback to use the clocksource as sched_clock if no separate + * timer is found. sched_io_base then points to the current_value + * register of the clocksource timer. + */ + sched_io_base = iobase + 0x04; + sched_rate = rate; +} static u32 read_sched_clock(void) { @@ -89,39 +120,37 @@ static const struct of_device_id sptimer_ids[] __initconst = { static void init_sched_clock(void) { struct device_node *sched_timer; - u32 rate; sched_timer = of_find_matching_node(NULL, sptimer_ids); - if (!sched_timer) - panic("No RTC for sched clock to use"); + if (sched_timer) { + timer_get_base_and_rate(sched_timer, &sched_io_base, + &sched_rate); + of_node_put(sched_timer); + } - timer_get_base_and_rate(sched_timer, &sched_io_base, &rate); - of_node_put(sched_timer); - - setup_sched_clock(read_sched_clock, 32, rate); + setup_sched_clock(read_sched_clock, 32, sched_rate); } -static const struct of_device_id osctimer_ids[] __initconst = { - { .compatible = "picochip,pc3x2-timer" }, - { .compatible = "snps,dw-apb-timer-osc" }, - {}, -}; - -void __init dw_apb_timer_init(void) +static int num_called; +static void __init dw_apb_timer_init(struct device_node *timer) { - struct device_node *event_timer, *source_timer; - - event_timer = of_find_matching_node(NULL, osctimer_ids); - if (!event_timer) - panic("No timer for clockevent"); - add_clockevent(event_timer); - - source_timer = of_find_matching_node(event_timer, osctimer_ids); - if (!source_timer) - panic("No timer for clocksource"); - add_clocksource(source_timer); - - of_node_put(source_timer); - - init_sched_clock(); + switch (num_called) { + case 0: + pr_debug("%s: found clockevent timer\n", __func__); + add_clockevent(timer); + of_node_put(timer); + break; + case 1: + pr_debug("%s: found clocksource timer\n", __func__); + add_clocksource(timer); + of_node_put(timer); + init_sched_clock(); + break; + default: + break; + } + + num_called++; } +CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init); +CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer-osc", dw_apb_timer_init); diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 662fcc065821..b2bbc415f120 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -400,19 +400,7 @@ static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) return IRQ_HANDLED; } -static struct irqaction mct_tick0_event_irq = { - .name = "mct_tick0_irq", - .flags = IRQF_TIMER | IRQF_NOBALANCING, - .handler = exynos4_mct_tick_isr, -}; - -static struct irqaction mct_tick1_event_irq = { - .name = "mct_tick1_irq", - .flags = IRQF_TIMER | IRQF_NOBALANCING, - .handler = exynos4_mct_tick_isr, -}; - -static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt) +static int exynos4_local_timer_setup(struct clock_event_device *evt) { struct mct_clock_event_device *mevt; unsigned int cpu = smp_processor_id(); @@ -435,16 +423,15 @@ static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt) exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET); if (mct_int_type == MCT_INT_SPI) { - if (cpu == 0) { - mct_tick0_event_irq.dev_id = mevt; - evt->irq = mct_irqs[MCT_L0_IRQ]; - setup_irq(evt->irq, &mct_tick0_event_irq); - } else { - mct_tick1_event_irq.dev_id = mevt; - evt->irq = mct_irqs[MCT_L1_IRQ]; - setup_irq(evt->irq, &mct_tick1_event_irq); - irq_set_affinity(evt->irq, cpumask_of(1)); + evt->irq = mct_irqs[MCT_L0_IRQ + cpu]; + if (request_irq(evt->irq, exynos4_mct_tick_isr, + IRQF_TIMER | IRQF_NOBALANCING, + evt->name, mevt)) { + pr_err("exynos-mct: cannot register IRQ %d\n", + evt->irq); + return -EIO; } + irq_set_affinity(evt->irq, cpumask_of(cpu)); } else { enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0); } @@ -454,18 +441,14 @@ static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt) static void exynos4_local_timer_stop(struct clock_event_device *evt) { - unsigned int cpu = smp_processor_id(); evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); if (mct_int_type == MCT_INT_SPI) - if (cpu == 0) - remove_irq(evt->irq, &mct_tick0_event_irq); - else - remove_irq(evt->irq, &mct_tick1_event_irq); + free_irq(evt->irq, this_cpu_ptr(&percpu_mct_tick)); else disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); } -static struct local_timer_ops exynos4_mct_tick_ops __cpuinitdata = { +static struct local_timer_ops exynos4_mct_tick_ops = { .setup = exynos4_local_timer_setup, .stop = exynos4_local_timer_stop, }; diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c index ade7513a11d1..9e4db41abe3c 100644 --- a/drivers/clocksource/metag_generic.c +++ b/drivers/clocksource/metag_generic.c @@ -109,7 +109,7 @@ unsigned long long sched_clock(void) return ticks << HARDWARE_TO_NS_SHIFT; } -static void __cpuinit arch_timer_setup(unsigned int cpu) +static void arch_timer_setup(unsigned int cpu) { unsigned int txdivtime; struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); @@ -154,7 +154,7 @@ static void __cpuinit arch_timer_setup(unsigned int cpu) } } -static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self, +static int arch_timer_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { int cpu = (long)hcpu; @@ -169,7 +169,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata arch_timer_cpu_nb = { +static struct notifier_block arch_timer_cpu_nb = { .notifier_call = arch_timer_cpu_notify, }; @@ -184,6 +184,8 @@ int __init metag_generic_timer_init(void) #ifdef CONFIG_METAG_META21 hwtimer_freq = get_coreclock() / (metag_in32(EXPAND_TIMER_DIV) + 1); #endif + pr_info("Timer frequency: %u Hz\n", hwtimer_freq); + clocksource_register_hz(&clocksource_metag, hwtimer_freq); setup_irq(tbisig_map(TBID_SIGNUM_TRT), &metag_timer_irq); diff --git a/drivers/clocksource/moxart_timer.c b/drivers/clocksource/moxart_timer.c new file mode 100644 index 000000000000..5eb2c35932b1 --- /dev/null +++ b/drivers/clocksource/moxart_timer.c @@ -0,0 +1,165 @@ +/* + * MOXA ART SoCs timer handling. + * + * Copyright (C) 2013 Jonas Jensen + * + * Jonas Jensen <jonas.jensen@gmail.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqreturn.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/io.h> +#include <linux/clocksource.h> +#include <linux/bitops.h> + +#define TIMER1_BASE 0x00 +#define TIMER2_BASE 0x10 +#define TIMER3_BASE 0x20 + +#define REG_COUNT 0x0 /* writable */ +#define REG_LOAD 0x4 +#define REG_MATCH1 0x8 +#define REG_MATCH2 0xC + +#define TIMER_CR 0x30 +#define TIMER_INTR_STATE 0x34 +#define TIMER_INTR_MASK 0x38 + +/* + * TIMER_CR flags: + * + * TIMEREG_CR_*_CLOCK 0: PCLK, 1: EXT1CLK + * TIMEREG_CR_*_INT overflow interrupt enable bit + */ +#define TIMEREG_CR_1_ENABLE BIT(0) +#define TIMEREG_CR_1_CLOCK BIT(1) +#define TIMEREG_CR_1_INT BIT(2) +#define TIMEREG_CR_2_ENABLE BIT(3) +#define TIMEREG_CR_2_CLOCK BIT(4) +#define TIMEREG_CR_2_INT BIT(5) +#define TIMEREG_CR_3_ENABLE BIT(6) +#define TIMEREG_CR_3_CLOCK BIT(7) +#define TIMEREG_CR_3_INT BIT(8) +#define TIMEREG_CR_COUNT_UP BIT(9) + +#define TIMER1_ENABLE (TIMEREG_CR_2_ENABLE | TIMEREG_CR_1_ENABLE) +#define TIMER1_DISABLE (TIMEREG_CR_2_ENABLE) + +static void __iomem *base; +static unsigned int clock_count_per_tick; + +static void moxart_clkevt_mode(enum clock_event_mode mode, + struct clock_event_device *clk) +{ + switch (mode) { + case CLOCK_EVT_MODE_RESUME: + case CLOCK_EVT_MODE_ONESHOT: + writel(TIMER1_DISABLE, base + TIMER_CR); + writel(~0, base + TIMER1_BASE + REG_LOAD); + break; + case CLOCK_EVT_MODE_PERIODIC: + writel(clock_count_per_tick, base + TIMER1_BASE + REG_LOAD); + writel(TIMER1_ENABLE, base + TIMER_CR); + break; + case CLOCK_EVT_MODE_UNUSED: + case CLOCK_EVT_MODE_SHUTDOWN: + default: + writel(TIMER1_DISABLE, base + TIMER_CR); + break; + } +} + +static int moxart_clkevt_next_event(unsigned long cycles, + struct clock_event_device *unused) +{ + u32 u; + + writel(TIMER1_DISABLE, base + TIMER_CR); + + u = readl(base + TIMER1_BASE + REG_COUNT) - cycles; + writel(u, base + TIMER1_BASE + REG_MATCH1); + + writel(TIMER1_ENABLE, base + TIMER_CR); + + return 0; +} + +static struct clock_event_device moxart_clockevent = { + .name = "moxart_timer", + .rating = 200, + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .set_mode = moxart_clkevt_mode, + .set_next_event = moxart_clkevt_next_event, +}; + +static irqreturn_t moxart_timer_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evt = dev_id; + evt->event_handler(evt); + return IRQ_HANDLED; +} + +static struct irqaction moxart_timer_irq = { + .name = "moxart-timer", + .flags = IRQF_TIMER, + .handler = moxart_timer_interrupt, + .dev_id = &moxart_clockevent, +}; + +static void __init moxart_timer_init(struct device_node *node) +{ + int ret, irq; + unsigned long pclk; + struct clk *clk; + + base = of_iomap(node, 0); + if (!base) + panic("%s: of_iomap failed\n", node->full_name); + + irq = irq_of_parse_and_map(node, 0); + if (irq <= 0) + panic("%s: irq_of_parse_and_map failed\n", node->full_name); + + ret = setup_irq(irq, &moxart_timer_irq); + if (ret) + panic("%s: setup_irq failed\n", node->full_name); + + clk = of_clk_get(node, 0); + if (IS_ERR(clk)) + panic("%s: of_clk_get failed\n", node->full_name); + + pclk = clk_get_rate(clk); + + if (clocksource_mmio_init(base + TIMER2_BASE + REG_COUNT, + "moxart_timer", pclk, 200, 32, + clocksource_mmio_readl_down)) + panic("%s: clocksource_mmio_init failed\n", node->full_name); + + clock_count_per_tick = DIV_ROUND_CLOSEST(pclk, HZ); + + writel(~0, base + TIMER2_BASE + REG_LOAD); + writel(TIMEREG_CR_2_ENABLE, base + TIMER_CR); + + moxart_clockevent.cpumask = cpumask_of(0); + moxart_clockevent.irq = irq; + + /* + * documentation is not publicly available: + * min_delta / max_delta obtained by trial-and-error, + * max_delta 0xfffffffe should be ok because count + * register size is u32 + */ + clockevents_config_and_register(&moxart_clockevent, pclk, + 0x4, 0xfffffffe); +} +CLOCKSOURCE_OF_DECLARE(moxart, "moxa,moxart-timer", moxart_timer_init); diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c index 02af4204af86..0f5e65f74dc3 100644 --- a/drivers/clocksource/mxs_timer.c +++ b/drivers/clocksource/mxs_timer.c @@ -29,9 +29,9 @@ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/stmp_device.h> +#include <linux/sched_clock.h> #include <asm/mach/time.h> -#include <asm/sched_clock.h> /* * There are 2 versions of the timrot on Freescale MXS-based SoCs. diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c index e405531e1cc5..7d2c2c56f73c 100644 --- a/drivers/clocksource/nomadik-mtu.c +++ b/drivers/clocksource/nomadik-mtu.c @@ -13,13 +13,16 @@ #include <linux/io.h> #include <linux/clockchips.h> #include <linux/clocksource.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> #include <linux/clk.h> #include <linux/jiffies.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/platform_data/clocksource-nomadik-mtu.h> +#include <linux/sched_clock.h> #include <asm/mach/time.h> -#include <asm/sched_clock.h> /* * The MTU device hosts four different counters, with 4 set of @@ -188,22 +191,15 @@ static struct irqaction nmdk_timer_irq = { .dev_id = &nmdk_clkevt, }; -void __init nmdk_timer_init(void __iomem *base, int irq) +static void __init __nmdk_timer_init(void __iomem *base, int irq, + struct clk *pclk, struct clk *clk) { unsigned long rate; - struct clk *clk0, *pclk0; mtu_base = base; - pclk0 = clk_get_sys("mtu0", "apb_pclk"); - BUG_ON(IS_ERR(pclk0)); - BUG_ON(clk_prepare(pclk0) < 0); - BUG_ON(clk_enable(pclk0) < 0); - - clk0 = clk_get_sys("mtu0", NULL); - BUG_ON(IS_ERR(clk0)); - BUG_ON(clk_prepare(clk0) < 0); - BUG_ON(clk_enable(clk0) < 0); + BUG_ON(clk_prepare_enable(pclk)); + BUG_ON(clk_prepare_enable(clk)); /* * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz @@ -213,7 +209,7 @@ void __init nmdk_timer_init(void __iomem *base, int irq) * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer * with 16 gives too low timer resolution. */ - rate = clk_get_rate(clk0); + rate = clk_get_rate(clk); if (rate > 32000000) { rate /= 16; clk_prescale = MTU_CRn_PRESCALE_16; @@ -247,3 +243,43 @@ void __init nmdk_timer_init(void __iomem *base, int irq) mtu_delay_timer.freq = rate; register_current_timer_delay(&mtu_delay_timer); } + +void __init nmdk_timer_init(void __iomem *base, int irq) +{ + struct clk *clk0, *pclk0; + + pclk0 = clk_get_sys("mtu0", "apb_pclk"); + BUG_ON(IS_ERR(pclk0)); + clk0 = clk_get_sys("mtu0", NULL); + BUG_ON(IS_ERR(clk0)); + + __nmdk_timer_init(base, irq, pclk0, clk0); +} + +static void __init nmdk_timer_of_init(struct device_node *node) +{ + struct clk *pclk; + struct clk *clk; + void __iomem *base; + int irq; + + base = of_iomap(node, 0); + if (!base) + panic("Can't remap registers"); + + pclk = of_clk_get_by_name(node, "apb_pclk"); + if (IS_ERR(pclk)) + panic("could not get apb_pclk"); + + clk = of_clk_get_by_name(node, "timclk"); + if (IS_ERR(clk)) + panic("could not get timclk"); + + irq = irq_of_parse_and_map(node, 0); + if (irq <= 0) + panic("Can't parse IRQ"); + + __nmdk_timer_init(base, irq, pclk, clk); +} +CLOCKSOURCE_OF_DECLARE(nomadik_mtu, "st,nomadik-mtu", + nmdk_timer_of_init); diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c index 0234c8d2c8f2..584b5472eea3 100644 --- a/drivers/clocksource/samsung_pwm_timer.c +++ b/drivers/clocksource/samsung_pwm_timer.c @@ -21,10 +21,10 @@ #include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/slab.h> +#include <linux/sched_clock.h> #include <clocksource/samsung_pwm.h> -#include <asm/sched_clock.h> /* * Clocksource driver diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c index d4674e78ef35..8ead0258740a 100644 --- a/drivers/clocksource/sun4i_timer.c +++ b/drivers/clocksource/sun4i_timer.c @@ -19,42 +19,83 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqreturn.h> +#include <linux/sched_clock.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #define TIMER_IRQ_EN_REG 0x00 -#define TIMER_IRQ_EN(val) (1 << val) +#define TIMER_IRQ_EN(val) BIT(val) #define TIMER_IRQ_ST_REG 0x04 #define TIMER_CTL_REG(val) (0x10 * val + 0x10) -#define TIMER_CTL_ENABLE (1 << 0) -#define TIMER_CTL_AUTORELOAD (1 << 1) -#define TIMER_CTL_ONESHOT (1 << 7) -#define TIMER_INTVAL_REG(val) (0x10 * val + 0x14) -#define TIMER_CNTVAL_REG(val) (0x10 * val + 0x18) - -#define TIMER_SCAL 16 +#define TIMER_CTL_ENABLE BIT(0) +#define TIMER_CTL_RELOAD BIT(1) +#define TIMER_CTL_CLK_SRC(val) (((val) & 0x3) << 2) +#define TIMER_CTL_CLK_SRC_OSC24M (1) +#define TIMER_CTL_CLK_PRES(val) (((val) & 0x7) << 4) +#define TIMER_CTL_ONESHOT BIT(7) +#define TIMER_INTVAL_REG(val) (0x10 * (val) + 0x14) +#define TIMER_CNTVAL_REG(val) (0x10 * (val) + 0x18) static void __iomem *timer_base; +static u32 ticks_per_jiffy; + +/* + * When we disable a timer, we need to wait at least for 2 cycles of + * the timer source clock. We will use for that the clocksource timer + * that is already setup and runs at the same frequency than the other + * timers, and we never will be disabled. + */ +static void sun4i_clkevt_sync(void) +{ + u32 old = readl(timer_base + TIMER_CNTVAL_REG(1)); + + while ((old - readl(timer_base + TIMER_CNTVAL_REG(1))) < 3) + cpu_relax(); +} + +static void sun4i_clkevt_time_stop(u8 timer) +{ + u32 val = readl(timer_base + TIMER_CTL_REG(timer)); + writel(val & ~TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG(timer)); + sun4i_clkevt_sync(); +} + +static void sun4i_clkevt_time_setup(u8 timer, unsigned long delay) +{ + writel(delay, timer_base + TIMER_INTVAL_REG(timer)); +} + +static void sun4i_clkevt_time_start(u8 timer, bool periodic) +{ + u32 val = readl(timer_base + TIMER_CTL_REG(timer)); + + if (periodic) + val &= ~TIMER_CTL_ONESHOT; + else + val |= TIMER_CTL_ONESHOT; + + writel(val | TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, + timer_base + TIMER_CTL_REG(timer)); +} static void sun4i_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *clk) { - u32 u = readl(timer_base + TIMER_CTL_REG(0)); - switch (mode) { case CLOCK_EVT_MODE_PERIODIC: - u &= ~(TIMER_CTL_ONESHOT); - writel(u | TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG(0)); + sun4i_clkevt_time_stop(0); + sun4i_clkevt_time_setup(0, ticks_per_jiffy); + sun4i_clkevt_time_start(0, true); break; - case CLOCK_EVT_MODE_ONESHOT: - writel(u | TIMER_CTL_ONESHOT, timer_base + TIMER_CTL_REG(0)); + sun4i_clkevt_time_stop(0); + sun4i_clkevt_time_start(0, false); break; case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: default: - writel(u & ~(TIMER_CTL_ENABLE), timer_base + TIMER_CTL_REG(0)); + sun4i_clkevt_time_stop(0); break; } } @@ -62,10 +103,9 @@ static void sun4i_clkevt_mode(enum clock_event_mode mode, static int sun4i_clkevt_next_event(unsigned long evt, struct clock_event_device *unused) { - u32 u = readl(timer_base + TIMER_CTL_REG(0)); - writel(evt, timer_base + TIMER_CNTVAL_REG(0)); - writel(u | TIMER_CTL_ENABLE | TIMER_CTL_AUTORELOAD, - timer_base + TIMER_CTL_REG(0)); + sun4i_clkevt_time_stop(0); + sun4i_clkevt_time_setup(0, evt); + sun4i_clkevt_time_start(0, false); return 0; } @@ -96,6 +136,11 @@ static struct irqaction sun4i_timer_irq = { .dev_id = &sun4i_clockevent, }; +static u32 sun4i_timer_sched_read(void) +{ + return ~readl(timer_base + TIMER_CNTVAL_REG(1)); +} + static void __init sun4i_timer_init(struct device_node *node) { unsigned long rate = 0; @@ -114,22 +159,23 @@ static void __init sun4i_timer_init(struct device_node *node) clk = of_clk_get(node, 0); if (IS_ERR(clk)) panic("Can't get timer clock"); + clk_prepare_enable(clk); rate = clk_get_rate(clk); - writel(rate / (TIMER_SCAL * HZ), - timer_base + TIMER_INTVAL_REG(0)); + writel(~0, timer_base + TIMER_INTVAL_REG(1)); + writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD | + TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), + timer_base + TIMER_CTL_REG(1)); + + setup_sched_clock(sun4i_timer_sched_read, 32, rate); + clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name, + rate, 300, 32, clocksource_mmio_readl_down); - /* set clock source to HOSC, 16 pre-division */ - val = readl(timer_base + TIMER_CTL_REG(0)); - val &= ~(0x07 << 4); - val &= ~(0x03 << 2); - val |= (4 << 4) | (1 << 2); - writel(val, timer_base + TIMER_CTL_REG(0)); + ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); - /* set mode to auto reload */ - val = readl(timer_base + TIMER_CTL_REG(0)); - writel(val | TIMER_CTL_AUTORELOAD, timer_base + TIMER_CTL_REG(0)); + writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), + timer_base + TIMER_CTL_REG(0)); ret = setup_irq(irq, &sun4i_timer_irq); if (ret) @@ -141,8 +187,8 @@ static void __init sun4i_timer_init(struct device_node *node) sun4i_clockevent.cpumask = cpumask_of(0); - clockevents_config_and_register(&sun4i_clockevent, rate / TIMER_SCAL, - 0x1, 0xff); + clockevents_config_and_register(&sun4i_clockevent, rate, 0x1, + 0xffffffff); } CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-timer", sun4i_timer_init); diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c index ae877b021b54..93961703b887 100644 --- a/drivers/clocksource/tegra20_timer.c +++ b/drivers/clocksource/tegra20_timer.c @@ -26,10 +26,10 @@ #include <linux/io.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/sched_clock.h> #include <asm/mach/time.h> #include <asm/smp_twd.h> -#include <asm/sched_clock.h> #define RTC_SECONDS 0x08 #define RTC_SHADOW_SECONDS 0x0c diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index 47a673070d70..1b04b7e1d39b 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c @@ -27,8 +27,8 @@ #include <linux/of_address.h> #include <linux/irq.h> #include <linux/module.h> +#include <linux/sched_clock.h> -#include <asm/sched_clock.h> #include <asm/localtimer.h> #include <linux/percpu.h> /* @@ -167,7 +167,7 @@ static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id) /* * Setup the local clock events for a CPU. */ -static int __cpuinit armada_370_xp_timer_setup(struct clock_event_device *evt) +static int armada_370_xp_timer_setup(struct clock_event_device *evt) { u32 u; int cpu = smp_processor_id(); @@ -205,7 +205,7 @@ static void armada_370_xp_timer_stop(struct clock_event_device *evt) disable_percpu_irq(evt->irq); } -static struct local_timer_ops armada_370_xp_local_timer_ops __cpuinitdata = { +static struct local_timer_ops armada_370_xp_local_timer_ops = { .setup = armada_370_xp_timer_setup, .stop = armada_370_xp_timer_stop, }; diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/time-orion.c new file mode 100644 index 000000000000..9c7f018a67ca --- /dev/null +++ b/drivers/clocksource/time-orion.c @@ -0,0 +1,150 @@ +/* + * Marvell Orion SoC timer handling. + * + * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + * Timer 0 is used as free-running clocksource, while timer 1 is + * used as clock_event_device. + */ + +#include <linux/kernel.h> +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/spinlock.h> +#include <linux/sched_clock.h> + +#define TIMER_CTRL 0x00 +#define TIMER0_EN BIT(0) +#define TIMER0_RELOAD_EN BIT(1) +#define TIMER1_EN BIT(2) +#define TIMER1_RELOAD_EN BIT(3) +#define TIMER0_RELOAD 0x10 +#define TIMER0_VAL 0x14 +#define TIMER1_RELOAD 0x18 +#define TIMER1_VAL 0x1c + +#define ORION_ONESHOT_MIN 1 +#define ORION_ONESHOT_MAX 0xfffffffe + +static void __iomem *timer_base; +static DEFINE_SPINLOCK(timer_ctrl_lock); + +/* + * Thread-safe access to TIMER_CTRL register + * (shared with watchdog timer) + */ +void orion_timer_ctrl_clrset(u32 clr, u32 set) +{ + spin_lock(&timer_ctrl_lock); + writel((readl(timer_base + TIMER_CTRL) & ~clr) | set, + timer_base + TIMER_CTRL); + spin_unlock(&timer_ctrl_lock); +} +EXPORT_SYMBOL(orion_timer_ctrl_clrset); + +/* + * Free-running clocksource handling. + */ +static u32 notrace orion_read_sched_clock(void) +{ + return ~readl(timer_base + TIMER0_VAL); +} + +/* + * Clockevent handling. + */ +static u32 ticks_per_jiffy; + +static int orion_clkevt_next_event(unsigned long delta, + struct clock_event_device *dev) +{ + /* setup and enable one-shot timer */ + writel(delta, timer_base + TIMER1_VAL); + orion_timer_ctrl_clrset(TIMER1_RELOAD_EN, TIMER1_EN); + + return 0; +} + +static void orion_clkevt_mode(enum clock_event_mode mode, + struct clock_event_device *dev) +{ + if (mode == CLOCK_EVT_MODE_PERIODIC) { + /* setup and enable periodic timer at 1/HZ intervals */ + writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD); + writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL); + orion_timer_ctrl_clrset(0, TIMER1_RELOAD_EN | TIMER1_EN); + } else { + /* disable timer */ + orion_timer_ctrl_clrset(TIMER1_RELOAD_EN | TIMER1_EN, 0); + } +} + +static struct clock_event_device orion_clkevt = { + .name = "orion_event", + .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, + .shift = 32, + .rating = 300, + .set_next_event = orion_clkevt_next_event, + .set_mode = orion_clkevt_mode, +}; + +static irqreturn_t orion_clkevt_irq_handler(int irq, void *dev_id) +{ + orion_clkevt.event_handler(&orion_clkevt); + return IRQ_HANDLED; +} + +static struct irqaction orion_clkevt_irq = { + .name = "orion_event", + .flags = IRQF_TIMER, + .handler = orion_clkevt_irq_handler, +}; + +static void __init orion_timer_init(struct device_node *np) +{ + struct clk *clk; + int irq; + + /* timer registers are shared with watchdog timer */ + timer_base = of_iomap(np, 0); + if (!timer_base) + panic("%s: unable to map resource\n", np->name); + + clk = of_clk_get(np, 0); + if (IS_ERR(clk)) + panic("%s: unable to get clk\n", np->name); + clk_prepare_enable(clk); + + /* we are only interested in timer1 irq */ + irq = irq_of_parse_and_map(np, 1); + if (irq <= 0) + panic("%s: unable to parse timer1 irq\n", np->name); + + /* setup timer0 as free-running clocksource */ + writel(~0, timer_base + TIMER0_VAL); + writel(~0, timer_base + TIMER0_RELOAD); + orion_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | TIMER0_EN); + clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource", + clk_get_rate(clk), 300, 32, + clocksource_mmio_readl_down); + setup_sched_clock(orion_read_sched_clock, 32, clk_get_rate(clk)); + + /* setup timer1 as clockevent timer */ + if (setup_irq(irq, &orion_clkevt_irq)) + panic("%s: unable to setup irq\n", np->name); + + ticks_per_jiffy = (clk_get_rate(clk) + HZ/2) / HZ; + orion_clkevt.cpumask = cpumask_of(0); + orion_clkevt.irq = irq; + clockevents_config_and_register(&orion_clkevt, clk_get_rate(clk), + ORION_ONESHOT_MIN, ORION_ONESHOT_MAX); +} +CLOCKSOURCE_OF_DECLARE(orion_timer, "marvell,orion-timer", orion_timer_init); diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c index 97738dbf3e3b..62876baa3ab9 100644 --- a/drivers/clocksource/timer-marco.c +++ b/drivers/clocksource/timer-marco.c @@ -17,7 +17,7 @@ #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_address.h> -#include <asm/sched_clock.h> +#include <linux/sched_clock.h> #include <asm/localtimer.h> #include <asm/mach/time.h> @@ -184,7 +184,7 @@ static struct irqaction sirfsoc_timer1_irq = { .handler = sirfsoc_timer_interrupt, }; -static int __cpuinit sirfsoc_local_timer_setup(struct clock_event_device *ce) +static int sirfsoc_local_timer_setup(struct clock_event_device *ce) { /* Use existing clock_event for cpu 0 */ if (!smp_processor_id()) @@ -216,7 +216,7 @@ static void sirfsoc_local_timer_stop(struct clock_event_device *ce) remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq); } -static struct local_timer_ops sirfsoc_local_timer_ops __cpuinitdata = { +static struct local_timer_ops sirfsoc_local_timer_ops = { .setup = sirfsoc_local_timer_setup, .stop = sirfsoc_local_timer_stop, }; diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c index 760882665d7a..ef3cfb269d8b 100644 --- a/drivers/clocksource/timer-prima2.c +++ b/drivers/clocksource/timer-prima2.c @@ -18,7 +18,7 @@ #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_address.h> -#include <asm/sched_clock.h> +#include <linux/sched_clock.h> #include <asm/mach/time.h> #define SIRFSOC_TIMER_COUNTER_LO 0x0000 diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c new file mode 100644 index 000000000000..587e0202a70b --- /dev/null +++ b/drivers/clocksource/vf_pit_timer.c @@ -0,0 +1,194 @@ +/* + * Copyright 2012-2013 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + */ + +#include <linux/interrupt.h> +#include <linux/clockchips.h> +#include <linux/clk.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/sched_clock.h> + +/* + * Each pit takes 0x10 Bytes register space + */ +#define PITMCR 0x00 +#define PIT0_OFFSET 0x100 +#define PITn_OFFSET(n) (PIT0_OFFSET + 0x10 * (n)) +#define PITLDVAL 0x00 +#define PITCVAL 0x04 +#define PITTCTRL 0x08 +#define PITTFLG 0x0c + +#define PITMCR_MDIS (0x1 << 1) + +#define PITTCTRL_TEN (0x1 << 0) +#define PITTCTRL_TIE (0x1 << 1) +#define PITCTRL_CHN (0x1 << 2) + +#define PITTFLG_TIF 0x1 + +static void __iomem *clksrc_base; +static void __iomem *clkevt_base; +static unsigned long cycle_per_jiffy; + +static inline void pit_timer_enable(void) +{ + __raw_writel(PITTCTRL_TEN | PITTCTRL_TIE, clkevt_base + PITTCTRL); +} + +static inline void pit_timer_disable(void) +{ + __raw_writel(0, clkevt_base + PITTCTRL); +} + +static inline void pit_irq_acknowledge(void) +{ + __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG); +} + +static unsigned int pit_read_sched_clock(void) +{ + return __raw_readl(clksrc_base + PITCVAL); +} + +static int __init pit_clocksource_init(unsigned long rate) +{ + /* set the max load value and start the clock source counter */ + __raw_writel(0, clksrc_base + PITTCTRL); + __raw_writel(~0UL, clksrc_base + PITLDVAL); + __raw_writel(PITTCTRL_TEN, clksrc_base + PITTCTRL); + + setup_sched_clock(pit_read_sched_clock, 32, rate); + return clocksource_mmio_init(clksrc_base + PITCVAL, "vf-pit", rate, + 300, 32, clocksource_mmio_readl_down); +} + +static int pit_set_next_event(unsigned long delta, + struct clock_event_device *unused) +{ + /* + * set a new value to PITLDVAL register will not restart the timer, + * to abort the current cycle and start a timer period with the new + * value, the timer must be disabled and enabled again. + * and the PITLAVAL should be set to delta minus one according to pit + * hardware requirement. + */ + pit_timer_disable(); + __raw_writel(delta - 1, clkevt_base + PITLDVAL); + pit_timer_enable(); + + return 0; +} + +static void pit_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + pit_set_next_event(cycle_per_jiffy, evt); + break; + default: + break; + } +} + +static irqreturn_t pit_timer_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evt = dev_id; + + pit_irq_acknowledge(); + + /* + * pit hardware doesn't support oneshot, it will generate an interrupt + * and reload the counter value from PITLDVAL when PITCVAL reach zero, + * and start the counter again. So software need to disable the timer + * to stop the counter loop in ONESHOT mode. + */ + if (likely(evt->mode == CLOCK_EVT_MODE_ONESHOT)) + pit_timer_disable(); + + evt->event_handler(evt); + + return IRQ_HANDLED; +} + +static struct clock_event_device clockevent_pit = { + .name = "VF pit timer", + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .set_mode = pit_set_mode, + .set_next_event = pit_set_next_event, + .rating = 300, +}; + +static struct irqaction pit_timer_irq = { + .name = "VF pit timer", + .flags = IRQF_TIMER | IRQF_IRQPOLL, + .handler = pit_timer_interrupt, + .dev_id = &clockevent_pit, +}; + +static int __init pit_clockevent_init(unsigned long rate, int irq) +{ + __raw_writel(0, clkevt_base + PITTCTRL); + __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG); + + BUG_ON(setup_irq(irq, &pit_timer_irq)); + + clockevent_pit.cpumask = cpumask_of(0); + clockevent_pit.irq = irq; + /* + * The value for the LDVAL register trigger is calculated as: + * LDVAL trigger = (period / clock period) - 1 + * The pit is a 32-bit down count timer, when the conter value + * reaches 0, it will generate an interrupt, thus the minimal + * LDVAL trigger value is 1. And then the min_delta is + * minimal LDVAL trigger value + 1, and the max_delta is full 32-bit. + */ + clockevents_config_and_register(&clockevent_pit, rate, 2, 0xffffffff); + + return 0; +} + +static void __init pit_timer_init(struct device_node *np) +{ + struct clk *pit_clk; + void __iomem *timer_base; + unsigned long clk_rate; + int irq; + + timer_base = of_iomap(np, 0); + BUG_ON(!timer_base); + + /* + * PIT0 and PIT1 can be chained to build a 64-bit timer, + * so choose PIT2 as clocksource, PIT3 as clockevent device, + * and leave PIT0 and PIT1 unused for anyone else who needs them. + */ + clksrc_base = timer_base + PITn_OFFSET(2); + clkevt_base = timer_base + PITn_OFFSET(3); + + irq = irq_of_parse_and_map(np, 0); + BUG_ON(irq <= 0); + + pit_clk = of_clk_get(np, 0); + BUG_ON(IS_ERR(pit_clk)); + + BUG_ON(clk_prepare_enable(pit_clk)); + + clk_rate = clk_get_rate(pit_clk); + cycle_per_jiffy = clk_rate / (HZ); + + /* enable the pit module */ + __raw_writel(~PITMCR_MDIS, timer_base + PITMCR); + + BUG_ON(pit_clocksource_init(clk_rate)); + + pit_clockevent_init(clk_rate, irq); +} +CLOCKSOURCE_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init); diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c new file mode 100644 index 000000000000..ca81809d159d --- /dev/null +++ b/drivers/clocksource/zevio-timer.c @@ -0,0 +1,215 @@ +/* + * linux/drivers/clocksource/zevio-timer.c + * + * Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + */ + +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/cpumask.h> +#include <linux/interrupt.h> +#include <linux/slab.h> + +#define IO_CURRENT_VAL 0x00 +#define IO_DIVIDER 0x04 +#define IO_CONTROL 0x08 + +#define IO_TIMER1 0x00 +#define IO_TIMER2 0x0C + +#define IO_MATCH_BEGIN 0x18 +#define IO_MATCH(x) (IO_MATCH_BEGIN + ((x) << 2)) + +#define IO_INTR_STS 0x00 +#define IO_INTR_ACK 0x00 +#define IO_INTR_MSK 0x04 + +#define CNTL_STOP_TIMER (1 << 4) +#define CNTL_RUN_TIMER (0 << 4) + +#define CNTL_INC (1 << 3) +#define CNTL_DEC (0 << 3) + +#define CNTL_TOZERO 0 +#define CNTL_MATCH(x) ((x) + 1) +#define CNTL_FOREVER 7 + +/* There are 6 match registers but we only use one. */ +#define TIMER_MATCH 0 + +#define TIMER_INTR_MSK (1 << (TIMER_MATCH)) +#define TIMER_INTR_ALL 0x3F + +struct zevio_timer { + void __iomem *base; + void __iomem *timer1, *timer2; + void __iomem *interrupt_regs; + + struct clk *clk; + struct clock_event_device clkevt; + struct irqaction clkevt_irq; + + char clocksource_name[64]; + char clockevent_name[64]; +}; + +static int zevio_timer_set_event(unsigned long delta, + struct clock_event_device *dev) +{ + struct zevio_timer *timer = container_of(dev, struct zevio_timer, + clkevt); + + writel(delta, timer->timer1 + IO_CURRENT_VAL); + writel(CNTL_RUN_TIMER | CNTL_DEC | CNTL_MATCH(TIMER_MATCH), + timer->timer1 + IO_CONTROL); + + return 0; +} + +static void zevio_timer_set_mode(enum clock_event_mode mode, + struct clock_event_device *dev) +{ + struct zevio_timer *timer = container_of(dev, struct zevio_timer, + clkevt); + + switch (mode) { + case CLOCK_EVT_MODE_RESUME: + case CLOCK_EVT_MODE_ONESHOT: + /* Enable timer interrupts */ + writel(TIMER_INTR_MSK, timer->interrupt_regs + IO_INTR_MSK); + writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK); + break; + case CLOCK_EVT_MODE_SHUTDOWN: + case CLOCK_EVT_MODE_UNUSED: + /* Disable timer interrupts */ + writel(0, timer->interrupt_regs + IO_INTR_MSK); + writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK); + /* Stop timer */ + writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL); + break; + case CLOCK_EVT_MODE_PERIODIC: + default: + /* Unsupported */ + break; + } +} + +static irqreturn_t zevio_timer_interrupt(int irq, void *dev_id) +{ + struct zevio_timer *timer = dev_id; + u32 intr; + + intr = readl(timer->interrupt_regs + IO_INTR_ACK); + if (!(intr & TIMER_INTR_MSK)) + return IRQ_NONE; + + writel(TIMER_INTR_MSK, timer->interrupt_regs + IO_INTR_ACK); + writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL); + + if (timer->clkevt.event_handler) + timer->clkevt.event_handler(&timer->clkevt); + + return IRQ_HANDLED; +} + +static int __init zevio_timer_add(struct device_node *node) +{ + struct zevio_timer *timer; + struct resource res; + int irqnr, ret; + + timer = kzalloc(sizeof(*timer), GFP_KERNEL); + if (!timer) + return -ENOMEM; + + timer->base = of_iomap(node, 0); + if (!timer->base) { + ret = -EINVAL; + goto error_free; + } + timer->timer1 = timer->base + IO_TIMER1; + timer->timer2 = timer->base + IO_TIMER2; + + timer->clk = of_clk_get(node, 0); + if (IS_ERR(timer->clk)) { + ret = PTR_ERR(timer->clk); + pr_err("Timer clock not found! (error %d)\n", ret); + goto error_unmap; + } + + timer->interrupt_regs = of_iomap(node, 1); + irqnr = irq_of_parse_and_map(node, 0); + + of_address_to_resource(node, 0, &res); + scnprintf(timer->clocksource_name, sizeof(timer->clocksource_name), + "%llx.%s_clocksource", + (unsigned long long)res.start, node->name); + + scnprintf(timer->clockevent_name, sizeof(timer->clockevent_name), + "%llx.%s_clockevent", + (unsigned long long)res.start, node->name); + + if (timer->interrupt_regs && irqnr) { + timer->clkevt.name = timer->clockevent_name; + timer->clkevt.set_next_event = zevio_timer_set_event; + timer->clkevt.set_mode = zevio_timer_set_mode; + timer->clkevt.rating = 200; + timer->clkevt.cpumask = cpu_all_mask; + timer->clkevt.features = CLOCK_EVT_FEAT_ONESHOT; + timer->clkevt.irq = irqnr; + + writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL); + writel(0, timer->timer1 + IO_DIVIDER); + + /* Start with timer interrupts disabled */ + writel(0, timer->interrupt_regs + IO_INTR_MSK); + writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK); + + /* Interrupt to occur when timer value matches 0 */ + writel(0, timer->base + IO_MATCH(TIMER_MATCH)); + + timer->clkevt_irq.name = timer->clockevent_name; + timer->clkevt_irq.handler = zevio_timer_interrupt; + timer->clkevt_irq.dev_id = timer; + timer->clkevt_irq.flags = IRQF_TIMER | IRQF_IRQPOLL; + + setup_irq(irqnr, &timer->clkevt_irq); + + clockevents_config_and_register(&timer->clkevt, + clk_get_rate(timer->clk), 0x0001, 0xffff); + pr_info("Added %s as clockevent\n", timer->clockevent_name); + } + + writel(CNTL_STOP_TIMER, timer->timer2 + IO_CONTROL); + writel(0, timer->timer2 + IO_CURRENT_VAL); + writel(0, timer->timer2 + IO_DIVIDER); + writel(CNTL_RUN_TIMER | CNTL_FOREVER | CNTL_INC, + timer->timer2 + IO_CONTROL); + + clocksource_mmio_init(timer->timer2 + IO_CURRENT_VAL, + timer->clocksource_name, + clk_get_rate(timer->clk), + 200, 16, + clocksource_mmio_readw_up); + + pr_info("Added %s as clocksource\n", timer->clocksource_name); + + return 0; +error_unmap: + iounmap(timer->base); +error_free: + kfree(timer); + return ret; +} + +CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_add); |
