diff options
Diffstat (limited to 'include/linux/nmi.h')
| -rw-r--r-- | include/linux/nmi.h | 117 |
1 files changed, 117 insertions, 0 deletions
diff --git a/include/linux/nmi.h b/include/linux/nmi.h new file mode 100644 index 000000000000..dbda77dc510c --- /dev/null +++ b/include/linux/nmi.h @@ -0,0 +1,117 @@ +/* + * linux/include/linux/nmi.h + */ +#ifndef LINUX_NMI_H +#define LINUX_NMI_H + +#include <linux/sched.h> +#include <asm/irq.h> + +/** + * touch_nmi_watchdog - restart NMI watchdog timeout. + * + * If the architecture supports the NMI watchdog, touch_nmi_watchdog() + * may be used to reset the timeout - for code which intentionally + * disables interrupts for a long time. This call is stateless. + */ +#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR_NMI) +#include <asm/nmi.h> +#endif + +#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) +extern void touch_nmi_watchdog(void); +#else +static inline void touch_nmi_watchdog(void) +{ + touch_softlockup_watchdog(); +} +#endif + +#if defined(CONFIG_HARDLOCKUP_DETECTOR) +extern void hardlockup_detector_disable(void); +#else +static inline void hardlockup_detector_disable(void) {} +#endif + +/* + * Create trigger_all_cpu_backtrace() out of the arch-provided + * base function. Return whether such support was available, + * to allow calling code to fall back to some other mechanism: + */ +#ifdef arch_trigger_all_cpu_backtrace +static inline bool trigger_all_cpu_backtrace(void) +{ + #if defined(CONFIG_ARM64) + arch_trigger_all_cpu_backtrace(); + #else + arch_trigger_all_cpu_backtrace(true); + #endif + + return true; +} +static inline bool trigger_allbutself_cpu_backtrace(void) +{ + #if defined(CONFIG_ARM64) + arch_trigger_all_cpu_backtrace(); + #else + arch_trigger_all_cpu_backtrace(false); + #endif + + return true; +} + +/* generic implementation */ +void nmi_trigger_all_cpu_backtrace(bool include_self, + void (*raise)(cpumask_t *mask)); +bool nmi_cpu_backtrace(struct pt_regs *regs); + +#else +static inline bool trigger_all_cpu_backtrace(void) +{ + return false; +} +static inline bool trigger_allbutself_cpu_backtrace(void) +{ + return false; +} +#endif + +#ifdef CONFIG_LOCKUP_DETECTOR +int hw_nmi_is_cpu_stuck(struct pt_regs *); +u64 hw_nmi_get_sample_period(int watchdog_thresh); +extern int nmi_watchdog_enabled; +extern int soft_watchdog_enabled; +extern int watchdog_user_enabled; +extern int watchdog_thresh; +extern unsigned long *watchdog_cpumask_bits; +extern int sysctl_softlockup_all_cpu_backtrace; +extern int sysctl_hardlockup_all_cpu_backtrace; +struct ctl_table; +extern int proc_watchdog(struct ctl_table *, int , + void __user *, size_t *, loff_t *); +extern int proc_nmi_watchdog(struct ctl_table *, int , + void __user *, size_t *, loff_t *); +extern int proc_soft_watchdog(struct ctl_table *, int , + void __user *, size_t *, loff_t *); +extern int proc_watchdog_thresh(struct ctl_table *, int , + void __user *, size_t *, loff_t *); +extern int proc_watchdog_cpumask(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int lockup_detector_suspend(void); +extern void lockup_detector_resume(void); +#else +static inline int lockup_detector_suspend(void) +{ + return 0; +} + +static inline void lockup_detector_resume(void) +{ +} +#endif + +#ifdef CONFIG_HAVE_ACPI_APEI_NMI +#include <asm/nmi.h> +#endif + +#endif |
