diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/resource.c | 3 | ||||
| -rw-r--r-- | kernel/sched/core.c | 38 | ||||
| -rw-r--r-- | kernel/sysctl.c | 16 | ||||
| -rw-r--r-- | kernel/time/hrtimer.c | 1 | ||||
| -rw-r--r-- | kernel/trace/trace_irqsoff.c | 42 |
5 files changed, 98 insertions, 2 deletions
diff --git a/kernel/resource.c b/kernel/resource.c index c09d484f7b5f..73348f574163 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -611,7 +611,8 @@ static int __find_resource(struct resource *root, struct resource *old, alloc.start = constraint->alignf(constraint->alignf_data, &avail, size, constraint->align); alloc.end = alloc.start + size - 1; - if (resource_contains(&avail, &alloc)) { + if (alloc.start <= alloc.end && + resource_contains(&avail, &alloc)) { new->start = alloc.start; new->end = alloc.end; return 0; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fffc50b0191f..c1ecb07de762 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3268,9 +3268,24 @@ notrace unsigned long get_parent_ip(unsigned long addr) #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ defined(CONFIG_PREEMPT_TRACER)) +/* + * preemptoff stack tracing threshold in ns. + * default: 1ms + */ +unsigned int sysctl_preemptoff_tracing_threshold_ns = 1000000UL; + +struct preempt_store { + u64 ts; + unsigned long caddr[4]; + bool irqs_disabled; +}; + +static DEFINE_PER_CPU(struct preempt_store, the_ps); void preempt_count_add(int val) { + struct preempt_store *ps = &per_cpu(the_ps, raw_smp_processor_id()); + #ifdef CONFIG_DEBUG_PREEMPT /* * Underflow? @@ -3291,6 +3306,13 @@ void preempt_count_add(int val) #ifdef CONFIG_DEBUG_PREEMPT current->preempt_disable_ip = ip; #endif + ps->ts = sched_clock(); + ps->caddr[0] = CALLER_ADDR0; + ps->caddr[1] = CALLER_ADDR1; + ps->caddr[2] = CALLER_ADDR2; + ps->caddr[3] = CALLER_ADDR3; + ps->irqs_disabled = irqs_disabled(); + trace_preempt_off(CALLER_ADDR0, ip); } } @@ -3313,8 +3335,22 @@ void preempt_count_sub(int val) return; #endif - if (preempt_count() == val) + if (preempt_count() == val) { + struct preempt_store *ps = &per_cpu(the_ps, + raw_smp_processor_id()); + u64 delta = sched_clock() - ps->ts; + + /* + * Trace preempt disable stack if preemption + * is disabled for more than the threshold. + */ + if (delta > sysctl_preemptoff_tracing_threshold_ns) + trace_sched_preempt_disable(delta, ps->irqs_disabled, + ps->caddr[0], ps->caddr[1], + ps->caddr[2], ps->caddr[3]); + trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); + } __preempt_count_sub(val); } EXPORT_SYMBOL(preempt_count_sub); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index bc4ca30ddc21..14f19af9d79a 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -291,6 +291,22 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, +#if defined(CONFIG_PREEMPT_TRACER) || defined(CONFIG_IRQSOFF_TRACER) + { + .procname = "preemptoff_tracing_threshold_ns", + .data = &sysctl_preemptoff_tracing_threshold_ns, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "irqsoff_tracing_threshold_ns", + .data = &sysctl_irqsoff_tracing_threshold_ns, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, +#endif #ifdef CONFIG_SCHED_HMP { .procname = "sched_freq_reporting_policy", diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index beafdf94b3b5..79fadcad21ff 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -435,6 +435,7 @@ void destroy_hrtimer_on_stack(struct hrtimer *timer) { debug_object_free(timer, &hrtimer_debug_descr); } +EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack); #else static inline void debug_hrtimer_init(struct hrtimer *timer) { } diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 21b162c07e83..c00137ea939e 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -13,6 +13,7 @@ #include <linux/uaccess.h> #include <linux/module.h> #include <linux/ftrace.h> +#include <linux/sched/sysctl.h> #include "trace.h" @@ -39,6 +40,12 @@ static int save_flags; static void stop_irqsoff_tracer(struct trace_array *tr, int graph); static int start_irqsoff_tracer(struct trace_array *tr, int graph); +/* + * irqsoff stack tracing threshold in ns. + * default: 1ms + */ +unsigned int sysctl_irqsoff_tracing_threshold_ns = 1000000UL; + #ifdef CONFIG_PREEMPT_TRACER static inline int preempt_trace(void) @@ -454,17 +461,52 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1) #else /* !CONFIG_PROVE_LOCKING */ +#ifdef CONFIG_PREEMPTIRQ_EVENTS +struct irqsoff_store { + u64 ts; + unsigned long caddr[4]; +}; + +static DEFINE_PER_CPU(struct irqsoff_store, the_irqsoff); +#endif /* CONFIG_PREEMPTIRQ_EVENTS */ + /* * We are only interested in hardirq on/off events: */ static inline void tracer_hardirqs_on(void) { +#ifdef CONFIG_PREEMPTIRQ_EVENTS + struct irqsoff_store *is = &per_cpu(the_irqsoff, + raw_smp_processor_id()); + + if (!is->ts) { + is->ts = sched_clock(); + is->caddr[0] = CALLER_ADDR0; + is->caddr[1] = CALLER_ADDR1; + is->caddr[2] = CALLER_ADDR2; + is->caddr[3] = CALLER_ADDR3; + } +#endif /* CONFIG_PREEMPTIRQ_EVENTS */ if (!preempt_trace() && irq_trace()) stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } static inline void tracer_hardirqs_off(void) { +#ifdef CONFIG_PREEMPTIRQ_EVENTS + struct irqsoff_store *is = &per_cpu(the_irqsoff, + raw_smp_processor_id()); + u64 delta = 0; + + if (is->ts) { + delta = sched_clock() - is->ts; + is->ts = 0; + } + if (delta > sysctl_irqsoff_tracing_threshold_ns) + trace_irqs_disable(delta, is->caddr[0], is->caddr[1], + is->caddr[2], is->caddr[3]); +#endif /* CONFIG_PREEMPTIRQ_EVENTS */ + if (!preempt_trace() && irq_trace()) start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } |
