summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c10
-rw-r--r--kernel/debug/kdb/kdb_io.c2
-rw-r--r--kernel/jump_label.c2
-rw-r--r--kernel/sched/cpufreq_schedutil.c11
-rw-r--r--kernel/sched/fair.c3
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/trace/trace_functions_graph.c1
-rw-r--r--kernel/workqueue.c1
8 files changed, 21 insertions, 11 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index 34f690b9213a..e228b88dfd23 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -80,13 +80,13 @@ static int audit_initialized;
#define AUDIT_OFF 0
#define AUDIT_ON 1
#define AUDIT_LOCKED 2
-u32 audit_enabled;
-u32 audit_ever_enabled;
+u32 audit_enabled = AUDIT_OFF;
+u32 audit_ever_enabled = !!AUDIT_OFF;
EXPORT_SYMBOL_GPL(audit_enabled);
/* Default state when kernel boots without any parameters. */
-static u32 audit_default;
+static u32 audit_default = AUDIT_OFF;
/* If auditing cannot proceed, audit_failure selects what happens. */
static u32 audit_failure = AUDIT_FAIL_PRINTK;
@@ -1185,8 +1185,6 @@ static int __init audit_init(void)
skb_queue_head_init(&audit_skb_queue);
skb_queue_head_init(&audit_skb_hold_queue);
audit_initialized = AUDIT_INITIALIZED;
- audit_enabled = audit_default;
- audit_ever_enabled |= !!audit_default;
audit_log(NULL, GFP_KERNEL, AUDIT_KERNEL, "initialized");
@@ -1203,6 +1201,8 @@ static int __init audit_enable(char *str)
audit_default = !!simple_strtol(str, NULL, 0);
if (!audit_default)
audit_initialized = AUDIT_DISABLED;
+ audit_enabled = audit_default;
+ audit_ever_enabled = !!audit_enabled;
pr_info("%s\n", audit_default ?
"enabled (after initialization)" : "disabled (until reboot)");
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 0b891286a150..3990c1f73e45 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -357,7 +357,7 @@ poll_again:
}
kdb_printf("\n");
for (i = 0; i < count; i++) {
- if (kallsyms_symbol_next(p_tmp, i) < 0)
+ if (WARN_ON(!kallsyms_symbol_next(p_tmp, i)))
break;
kdb_printf("%s ", p_tmp);
*(p_tmp + len) = '\0';
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 453ec4232852..e863b2339174 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -553,7 +553,7 @@ static __init int jump_label_test(void)
return 0;
}
-late_initcall(jump_label_test);
+early_initcall(jump_label_test);
#endif /* STATIC_KEYS_SELFTEST */
#endif /* HAVE_JUMP_LABEL */
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index d3765f0cb699..6c84b4d28914 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -130,8 +130,11 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
{
struct cpufreq_policy *policy = sg_policy->policy;
- if (sugov_up_down_rate_limit(sg_policy, time, next_freq))
+ if (sugov_up_down_rate_limit(sg_policy, time, next_freq)) {
+ /* Reset cached freq as next_freq isn't changed */
+ sg_policy->cached_raw_freq = 0;
return;
+ }
if (sg_policy->next_freq == next_freq)
return;
@@ -317,8 +320,12 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
* Do not reduce the frequency if the CPU has not been idle
* recently, as the reduction is likely to be premature then.
*/
- if (busy && next_f < sg_policy->next_freq)
+ if (busy && next_f < sg_policy->next_freq) {
next_f = sg_policy->next_freq;
+
+ /* Reset cached freq as next_freq has changed */
+ sg_policy->cached_raw_freq = 0;
+ }
}
sugov_update_commit(sg_policy, time, next_f);
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e515311aa93c..5867512e4e8d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7747,7 +7747,8 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
#ifdef CONFIG_SCHED_WALT
- if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util &&
+ p->state == TASK_WAKING)
delta = task_util(p);
#endif
/* Not enough spare capacity on previous cpu */
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 9029227e5f57..615ba59dbc10 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -236,7 +236,7 @@ static inline void lockdep_softirq_end(bool in_hardirq) { }
#define long_softirq_pending() (local_softirq_pending() & LONG_SOFTIRQ_MASK)
#define defer_for_rt() (long_softirq_pending() && cpupri_check_rt())
-asmlinkage __visible void __do_softirq(void)
+asmlinkage __visible void __softirq_entry __do_softirq(void)
{
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
unsigned long old_flags = current->flags;
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 96c75b0e9831..a804ee1b3ec6 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -8,6 +8,7 @@
*/
#include <linux/uaccess.h>
#include <linux/ftrace.h>
+#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/fs.h>
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index e56ba414839c..a719a4ad2e74 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1493,6 +1493,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
struct timer_list *timer = &dwork->timer;
struct work_struct *work = &dwork->work;
+ WARN_ON_ONCE(!wq);
WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
timer->data != (unsigned long)dwork);
WARN_ON_ONCE(timer_pending(timer));