From e303a832d93e66e691ae08fd39d9969355da4955 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 8 Nov 2017 19:47:36 +0530 Subject: BACKPORT: schedutil: Reset cached freq if it is not in sync with next_freq 'cached_raw_freq' is used to get the next frequency quickly but should always be in sync with sg_policy->next_freq. There are cases where it is not and in such cases it should be reset to avoid switching to incorrect frequencies. Consider this case for example: - policy->cur is 1.2 GHz (Max) - New request comes for 780 MHz and we store that in cached_raw_freq. - Based on 780 MHz, we calculate the effective frequency as 800 MHz. - We then decide not to update the frequency as sugov_up_down_rate_limit() return true. - Here cached_raw_freq is 780 MHz and sg_policy->next_freq is 1.2 GHz. - Now if the utilization doesn't change in next request, then the next target frequency will still be 780 MHz and it will match with cached_raw_freq and so we will directly return 1.2 GHz instead of 800 MHz. BACKPORT of upstream commit 07458f6a5171 ("cpufreq: schedutil: Reset cached_raw_freq when not in sync with next_freq"). This also updates sugov_update_commit() for handling up/down tunables, which aren't present in mainline. Change-Id: I70bca2c5dfdb545a0471d1c9e4c5addb30ab5494 Signed-off-by: Viresh Kumar --- kernel/sched/cpufreq_schedutil.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index d3765f0cb699..6c84b4d28914 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -130,8 +130,11 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time, { struct cpufreq_policy *policy = sg_policy->policy; - if (sugov_up_down_rate_limit(sg_policy, time, next_freq)) + if (sugov_up_down_rate_limit(sg_policy, time, next_freq)) { + /* Reset cached freq as next_freq isn't changed */ + sg_policy->cached_raw_freq = 0; return; + } if (sg_policy->next_freq == next_freq) return; @@ -317,8 +320,12 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, * Do not reduce the frequency if the CPU has not been idle * recently, as the reduction is likely to be premature then. */ - if (busy && next_f < sg_policy->next_freq) + if (busy && next_f < sg_policy->next_freq) { next_f = sg_policy->next_freq; + + /* Reset cached freq as next_freq has changed */ + sg_policy->cached_raw_freq = 0; + } } sugov_update_commit(sg_policy, time, next_f); } -- cgit v1.2.3 From b5797f6112c76298cacc3146cd578c82f167d094 Mon Sep 17 00:00:00 2001 From: Ke Wang Date: Thu, 9 Nov 2017 11:30:56 +0800 Subject: sched: EAS/WALT: Don't take into account of running task's util For upmigrating misfit running task case, the currently running task's util has been counted into cpu_util(). Thus currently __cpu_overutilized() which add task's uitl twice is overestimated. Signed-off-by: Ke Wang --- kernel/sched/fair.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b5ea66e5551c..d9b3ac867956 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6662,7 +6662,8 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync #ifdef CONFIG_SCHED_WALT - if (!walt_disabled && sysctl_sched_use_walt_cpu_util) + if (!walt_disabled && sysctl_sched_use_walt_cpu_util && + p->state == TASK_WAKING) delta = task_util(p); #endif /* Not enough spare capacity on previous cpu */ -- cgit v1.2.3 From 5249dcc4ebe88377f49a235a9aa977ddd964e41a Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Fri, 25 Mar 2016 14:22:05 -0700 Subject: UPSTREAM: arch, ftrace: for KASAN put hard/soft IRQ entries into separate sections KASAN needs to know whether the allocation happens in an IRQ handler. This lets us strip everything below the IRQ entry point to reduce the number of unique stack traces needed to be stored. Move the definition of __irq_entry to so that the users don't need to pull in . Also introduce the __softirq_entry macro which is similar to __irq_entry, but puts the corresponding functions to the .softirqentry.text section. Signed-off-by: Alexander Potapenko Acked-by: Steven Rostedt Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Andrey Konovalov Cc: Dmitry Vyukov Cc: Andrey Ryabinin Cc: Konstantin Serebryany Cc: Dmitry Chernenkov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Bug: 64145065 (cherry-picked from be7635e7287e0e8013af3c89a6354a9e0182594c) Change-Id: Ib321eb9c2b76ef4785cf3fd522169f524348bd9a Signed-off-by: Paul Lawrence --- kernel/softirq.c | 2 +- kernel/trace/trace_functions_graph.c | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/softirq.c b/kernel/softirq.c index 479e4436f787..359be4f39986 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -227,7 +227,7 @@ static inline bool lockdep_softirq_start(void) { return false; } static inline void lockdep_softirq_end(bool in_hardirq) { } #endif -asmlinkage __visible void __do_softirq(void) +asmlinkage __visible void __softirq_entry __do_softirq(void) { unsigned long end = jiffies + MAX_SOFTIRQ_TIME; unsigned long old_flags = current->flags; diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 96c75b0e9831..a804ee1b3ec6 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -8,6 +8,7 @@ */ #include #include +#include #include #include -- cgit v1.2.3 From d6ff4cce9aa3a8b2b6076785723ac387cfe0454c Mon Sep 17 00:00:00 2001 From: Daniel Thompson Date: Mon, 2 Mar 2015 14:13:36 +0000 Subject: kdb: Fix handling of kallsyms_symbol_next() return value commit c07d35338081d107e57cf37572d8cc931a8e32e2 upstream. kallsyms_symbol_next() returns a boolean (true on success). Currently kdb_read() tests the return value with an inequality that unconditionally evaluates to true. This is fixed in the obvious way and, since the conditional branch is supposed to be unreachable, we also add a WARN_ON(). Reported-by: Dan Carpenter Signed-off-by: Daniel Thompson Signed-off-by: Jason Wessel Signed-off-by: Greg Kroah-Hartman --- kernel/debug/kdb/kdb_io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index fc1ef736253c..77777d918676 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -349,7 +349,7 @@ poll_again: } kdb_printf("\n"); for (i = 0; i < count; i++) { - if (kallsyms_symbol_next(p_tmp, i) < 0) + if (WARN_ON(!kallsyms_symbol_next(p_tmp, i))) break; kdb_printf("%s ", p_tmp); *(p_tmp + len) = '\0'; -- cgit v1.2.3 From d9d47a6d6862df5536837b43407d3b527cad5090 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 6 Mar 2017 15:33:42 -0500 Subject: workqueue: trigger WARN if queue_delayed_work() is called with NULL @wq [ Upstream commit 637fdbae60d6cb9f6e963c1079d7e0445c86ff7d ] If queue_delayed_work() gets called with NULL @wq, the kernel will oops asynchronuosly on timer expiration which isn't too helpful in tracking down the offender. This actually happened with smc. __queue_delayed_work() already does several input sanity checks synchronously. Add NULL @wq check. Reported-by: Dave Jones Link: http://lkml.kernel.org/r/20170227171439.jshx3qplflyrgcv7@codemonkey.org.uk Signed-off-by: Tejun Heo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- kernel/workqueue.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 95cc76785a12..85555eb4d3cb 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1479,6 +1479,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, struct timer_list *timer = &dwork->timer; struct work_struct *work = &dwork->work; + WARN_ON_ONCE(!wq); WARN_ON_ONCE(timer->function != delayed_work_timer_fn || timer->data != (unsigned long)dwork); WARN_ON_ONCE(timer_pending(timer)); -- cgit v1.2.3 From 5c15c5c8ebc5b17e5cbd2fc77c2764d2249cc6f1 Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Mon, 13 Nov 2017 16:48:47 -0500 Subject: jump_label: Invoke jump_label_test() via early_initcall() [ Upstream commit 92ee46efeb505ead3ab06d3c5ce695637ed5f152 ] Fengguang Wu reported that running the rcuperf test during boot can cause the jump_label_test() to hit a WARN_ON(). The issue is that the core jump label code relies on kernel_text_address() to detect when it can no longer update branches that may be contained in __init sections. The kernel_text_address() in turn assumes that if the system_state variable is greter than or equal to SYSTEM_RUNNING then __init sections are no longer valid (since the assumption is that they have been freed). However, when rcuperf is setup to run in early boot it can call kernel_power_off() which sets the system_state to SYSTEM_POWER_OFF. Since rcuperf initialization is invoked via a module_init(), we can make the dependency of jump_label_test() needing to complete before rcuperf explicit by calling it via early_initcall(). Reported-by: Fengguang Wu Signed-off-by: Jason Baron Acked-by: Paul E. McKenney Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1510609727-2238-1-git-send-email-jbaron@akamai.com Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- kernel/jump_label.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 453ec4232852..e863b2339174 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -553,7 +553,7 @@ static __init int jump_label_test(void) return 0; } -late_initcall(jump_label_test); +early_initcall(jump_label_test); #endif /* STATIC_KEYS_SELFTEST */ #endif /* HAVE_JUMP_LABEL */ -- cgit v1.2.3 From b3495712705844b252ad8f81130a75a7ba1a0a7c Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Fri, 1 Sep 2017 09:44:34 -0400 Subject: audit: ensure that 'audit=1' actually enables audit for PID 1 [ Upstream commit 173743dd99a49c956b124a74c8aacb0384739a4c ] Prior to this patch we enabled audit in audit_init(), which is too late for PID 1 as the standard initcalls are run after the PID 1 task is forked. This means that we never allocate an audit_context (see audit_alloc()) for PID 1 and therefore miss a lot of audit events generated by PID 1. This patch enables audit as early as possible to help ensure that when PID 1 is forked it can allocate an audit_context if required. Reviewed-by: Richard Guy Briggs Signed-off-by: Paul Moore Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- kernel/audit.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/audit.c b/kernel/audit.c index 5ffcbd354a52..41f9a38bb800 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -80,13 +80,13 @@ static int audit_initialized; #define AUDIT_OFF 0 #define AUDIT_ON 1 #define AUDIT_LOCKED 2 -u32 audit_enabled; -u32 audit_ever_enabled; +u32 audit_enabled = AUDIT_OFF; +u32 audit_ever_enabled = !!AUDIT_OFF; EXPORT_SYMBOL_GPL(audit_enabled); /* Default state when kernel boots without any parameters. */ -static u32 audit_default; +static u32 audit_default = AUDIT_OFF; /* If auditing cannot proceed, audit_failure selects what happens. */ static u32 audit_failure = AUDIT_FAIL_PRINTK; @@ -1179,8 +1179,6 @@ static int __init audit_init(void) skb_queue_head_init(&audit_skb_queue); skb_queue_head_init(&audit_skb_hold_queue); audit_initialized = AUDIT_INITIALIZED; - audit_enabled = audit_default; - audit_ever_enabled |= !!audit_default; audit_log(NULL, GFP_KERNEL, AUDIT_KERNEL, "initialized"); @@ -1197,6 +1195,8 @@ static int __init audit_enable(char *str) audit_default = !!simple_strtol(str, NULL, 0); if (!audit_default) audit_initialized = AUDIT_DISABLED; + audit_enabled = audit_default; + audit_ever_enabled = !!audit_enabled; pr_info("%s\n", audit_default ? "enabled (after initialization)" : "disabled (until reboot)"); -- cgit v1.2.3