summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c40
-rw-r--r--kernel/power/hibernate.c17
-rw-r--r--kernel/sched/core.c21
-rw-r--r--kernel/sched/core_ctl.c60
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/watchdog.c2
6 files changed, 112 insertions, 30 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 8b6940755e4a..e822cb0e18d5 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -530,9 +530,41 @@ out:
return ret;
}
+static int switch_to_rt_policy(void)
+{
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+ unsigned int policy = current->policy;
+ int err;
+
+ /* Nobody should be attempting hotplug from these policy contexts. */
+ if (policy == SCHED_BATCH || policy == SCHED_IDLE ||
+ policy == SCHED_DEADLINE)
+ return -EPERM;
+
+ if (policy == SCHED_FIFO || policy == SCHED_RR)
+ return 1;
+
+ /* Only SCHED_NORMAL left. */
+ err = sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
+ return err;
+
+}
+
+static int switch_to_fair_policy(void)
+{
+ struct sched_param param = { .sched_priority = 0 };
+
+ return sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
+}
+
int cpu_up(unsigned int cpu)
{
int err = 0;
+ int switch_err = 0;
+
+ switch_err = switch_to_rt_policy();
+ if (switch_err < 0)
+ return switch_err;
if (!cpu_possible(cpu)) {
pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
@@ -558,6 +590,14 @@ int cpu_up(unsigned int cpu)
out:
cpu_maps_update_done();
+
+ if (!switch_err) {
+ switch_err = switch_to_fair_policy();
+ if (switch_err)
+ pr_err("Hotplug policy switch err=%d Task %s pid=%d\n",
+ switch_err, current->comm, current->pid);
+ }
+
return err;
}
EXPORT_SYMBOL_GPL(cpu_up);
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 3124cebaec31..2fc1177383a0 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -1159,6 +1159,22 @@ static int __init kaslr_nohibernate_setup(char *str)
return nohibernate_setup(str);
}
+static int __init page_poison_nohibernate_setup(char *str)
+{
+#ifdef CONFIG_PAGE_POISONING_ZERO
+ /*
+ * The zeroing option for page poison skips the checks on alloc.
+ * since hibernation doesn't save free pages there's no way to
+ * guarantee the pages will still be zeroed.
+ */
+ if (!strcmp(str, "on")) {
+ pr_info("Disabling hibernation due to page poisoning\n");
+ return nohibernate_setup(str);
+ }
+#endif
+ return 1;
+}
+
__setup("noresume", noresume_setup);
__setup("resume_offset=", resume_offset_setup);
__setup("resume=", resume_setup);
@@ -1167,3 +1183,4 @@ __setup("resumewait", resumewait_setup);
__setup("resumedelay=", resumedelay_setup);
__setup("nohibernate", nohibernate_setup);
__setup("kaslr", kaslr_nohibernate_setup);
+__setup("page_poison=", page_poison_nohibernate_setup);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 1017a3f77391..e107c4d6b385 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3364,6 +3364,9 @@ NOKPROBE_SYMBOL(preempt_count_sub);
*/
static noinline void __schedule_bug(struct task_struct *prev)
{
+ /* Save this before calling printk(), since that will clobber it */
+ unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
+
if (oops_in_progress)
return;
@@ -3374,13 +3377,12 @@ static noinline void __schedule_bug(struct task_struct *prev)
print_modules();
if (irqs_disabled())
print_irqtrace_events(prev);
-#ifdef CONFIG_DEBUG_PREEMPT
- if (in_atomic_preempt_off()) {
+ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
+ && in_atomic_preempt_off()) {
pr_err("Preemption disabled at:");
- print_ip_sym(current->preempt_disable_ip);
+ print_ip_sym(preempt_disable_ip);
pr_cont("\n");
}
-#endif
#ifdef CONFIG_PANIC_ON_SCHED_BUG
BUG();
#endif
@@ -8513,6 +8515,7 @@ EXPORT_SYMBOL(__might_sleep);
void ___might_sleep(const char *file, int line, int preempt_offset)
{
static unsigned long prev_jiffy; /* ratelimiting */
+ unsigned long preempt_disable_ip;
rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
@@ -8525,6 +8528,9 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
return;
prev_jiffy = jiffies;
+ /* Save this before calling printk(), since that will clobber it */
+ preempt_disable_ip = get_preempt_disable_ip(current);
+
printk(KERN_ERR
"BUG: sleeping function called from invalid context at %s:%d\n",
file, line);
@@ -8539,13 +8545,12 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
debug_show_held_locks(current);
if (irqs_disabled())
print_irqtrace_events(current);
-#ifdef CONFIG_DEBUG_PREEMPT
- if (!preempt_count_equals(preempt_offset)) {
+ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
+ && !preempt_count_equals(preempt_offset)) {
pr_err("Preemption disabled at:");
- print_ip_sym(current->preempt_disable_ip);
+ print_ip_sym(preempt_disable_ip);
pr_cont("\n");
}
-#endif
#ifdef CONFIG_PANIC_ON_SCHED_BUG
BUG();
#endif
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index 1dde338d30f2..0b5f2dea18a1 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -35,6 +35,7 @@ struct cluster_data {
unsigned int busy_down_thres[MAX_CPUS_PER_CLUSTER];
unsigned int active_cpus;
unsigned int num_cpus;
+ unsigned int nr_isolated_cpus;
cpumask_t cpu_mask;
unsigned int need_cpus;
unsigned int task_thres;
@@ -260,6 +261,7 @@ static ssize_t show_global_state(const struct cluster_data *state, char *buf)
ssize_t count = 0;
unsigned int cpu;
+ spin_lock_irq(&state_lock);
for_each_possible_cpu(cpu) {
c = &per_cpu(cpu_state, cpu);
cluster = c->cluster;
@@ -293,8 +295,12 @@ static ssize_t show_global_state(const struct cluster_data *state, char *buf)
count += snprintf(buf + count, PAGE_SIZE - count,
"\tNeed CPUs: %u\n", cluster->need_cpus);
count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tNr isolated CPUs: %u\n",
+ cluster->nr_isolated_cpus);
+ count += snprintf(buf + count, PAGE_SIZE - count,
"\tBoost: %u\n", (unsigned int) cluster->boost);
}
+ spin_unlock_irq(&state_lock);
return count;
}
@@ -525,7 +531,7 @@ static bool adjustment_possible(const struct cluster_data *cluster,
unsigned int need)
{
return (need < cluster->active_cpus || (need > cluster->active_cpus &&
- sched_isolate_count(&cluster->cpu_mask, false)));
+ cluster->nr_isolated_cpus));
}
static bool eval_need(struct cluster_data *cluster)
@@ -535,9 +541,8 @@ static bool eval_need(struct cluster_data *cluster)
unsigned int need_cpus = 0, last_need, thres_idx;
int ret = 0;
bool need_flag = false;
- unsigned int active_cpus;
unsigned int new_need;
- s64 now;
+ s64 now, elapsed;
if (unlikely(!cluster->inited))
return 0;
@@ -547,8 +552,8 @@ static bool eval_need(struct cluster_data *cluster)
if (cluster->boost) {
need_cpus = cluster->max_cpus;
} else {
- active_cpus = get_active_cpu_count(cluster);
- thres_idx = active_cpus ? active_cpus - 1 : 0;
+ cluster->active_cpus = get_active_cpu_count(cluster);
+ thres_idx = cluster->active_cpus ? cluster->active_cpus - 1 : 0;
list_for_each_entry(c, &cluster->lru, sib) {
if (c->busy >= cluster->busy_up_thres[thres_idx])
c->is_busy = true;
@@ -564,17 +569,16 @@ static bool eval_need(struct cluster_data *cluster)
last_need = cluster->need_cpus;
now = ktime_to_ms(ktime_get());
- if (new_need == last_need) {
- cluster->need_ts = now;
- spin_unlock_irqrestore(&state_lock, flags);
- return 0;
- }
-
- if (need_cpus > cluster->active_cpus) {
+ if (new_need > cluster->active_cpus) {
ret = 1;
- } else if (need_cpus < cluster->active_cpus) {
- s64 elapsed = now - cluster->need_ts;
+ } else {
+ if (new_need == last_need) {
+ cluster->need_ts = now;
+ spin_unlock_irqrestore(&state_lock, flags);
+ return 0;
+ }
+ elapsed = now - cluster->need_ts;
ret = elapsed >= cluster->offline_delay_ms;
}
@@ -582,7 +586,7 @@ static bool eval_need(struct cluster_data *cluster)
cluster->need_ts = now;
cluster->need_cpus = new_need;
}
- trace_core_ctl_eval_need(cluster->first_cpu, last_need, need_cpus,
+ trace_core_ctl_eval_need(cluster->first_cpu, last_need, new_need,
ret && need_flag);
spin_unlock_irqrestore(&state_lock, flags);
@@ -718,6 +722,7 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
struct cpu_data *c, *tmp;
unsigned long flags;
unsigned int num_cpus = cluster->num_cpus;
+ unsigned int nr_isolated = 0;
/*
* Protect against entry being removed (and added at tail) by other
@@ -742,12 +747,14 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
if (!sched_isolate_cpu(c->cpu)) {
c->isolated_by_us = true;
move_cpu_lru(c);
+ nr_isolated++;
} else {
pr_debug("Unable to isolate CPU%u\n", c->cpu);
}
cluster->active_cpus = get_active_cpu_count(cluster);
spin_lock_irqsave(&state_lock, flags);
}
+ cluster->nr_isolated_cpus += nr_isolated;
spin_unlock_irqrestore(&state_lock, flags);
/*
@@ -757,6 +764,7 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
if (cluster->active_cpus <= cluster->max_cpus)
return;
+ nr_isolated = 0;
num_cpus = cluster->num_cpus;
spin_lock_irqsave(&state_lock, flags);
list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
@@ -774,12 +782,14 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
if (!sched_isolate_cpu(c->cpu)) {
c->isolated_by_us = true;
move_cpu_lru(c);
+ nr_isolated++;
} else {
pr_debug("Unable to isolate CPU%u\n", c->cpu);
}
cluster->active_cpus = get_active_cpu_count(cluster);
spin_lock_irqsave(&state_lock, flags);
}
+ cluster->nr_isolated_cpus += nr_isolated;
spin_unlock_irqrestore(&state_lock, flags);
}
@@ -790,6 +800,7 @@ static void __try_to_unisolate(struct cluster_data *cluster,
struct cpu_data *c, *tmp;
unsigned long flags;
unsigned int num_cpus = cluster->num_cpus;
+ unsigned int nr_unisolated = 0;
/*
* Protect against entry being removed (and added at tail) by other
@@ -814,12 +825,14 @@ static void __try_to_unisolate(struct cluster_data *cluster,
if (!sched_unisolate_cpu(c->cpu)) {
c->isolated_by_us = false;
move_cpu_lru(c);
+ nr_unisolated++;
} else {
pr_debug("Unable to unisolate CPU%u\n", c->cpu);
}
cluster->active_cpus = get_active_cpu_count(cluster);
spin_lock_irqsave(&state_lock, flags);
}
+ cluster->nr_isolated_cpus -= nr_unisolated;
spin_unlock_irqrestore(&state_lock, flags);
}
@@ -885,10 +898,11 @@ static int __ref cpu_callback(struct notifier_block *nfb,
struct cpu_data *state = &per_cpu(cpu_state, cpu);
struct cluster_data *cluster = state->cluster;
unsigned int need;
- int ret = NOTIFY_OK;
+ bool do_wakeup, unisolated = false;
+ unsigned long flags;
if (unlikely(!cluster || !cluster->inited))
- return NOTIFY_OK;
+ return NOTIFY_DONE;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
@@ -911,6 +925,7 @@ static int __ref cpu_callback(struct notifier_block *nfb,
if (state->isolated_by_us) {
sched_unisolate_cpu_unlocked(cpu);
state->isolated_by_us = false;
+ unisolated = true;
}
/* Move a CPU to the end of the LRU when it goes offline. */
@@ -919,13 +934,20 @@ static int __ref cpu_callback(struct notifier_block *nfb,
state->busy = 0;
cluster->active_cpus = get_active_cpu_count(cluster);
break;
+ default:
+ return NOTIFY_DONE;
}
need = apply_limits(cluster, cluster->need_cpus);
- if (adjustment_possible(cluster, need))
+ spin_lock_irqsave(&state_lock, flags);
+ if (unisolated)
+ cluster->nr_isolated_cpus--;
+ do_wakeup = adjustment_possible(cluster, need);
+ spin_unlock_irqrestore(&state_lock, flags);
+ if (do_wakeup)
wake_up_core_ctl_thread(cluster);
- return ret;
+ return NOTIFY_OK;
}
static struct notifier_block __refdata cpu_notifier = {
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 66d9e907aa07..c0c10a335b3b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1634,7 +1634,7 @@ static void __trace_find_cmdline(int pid, char comm[])
map = savedcmd->map_pid_to_cmdline[pid];
if (map != NO_CMDLINE_MAP)
- strcpy(comm, get_saved_cmdlines(map));
+ strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN - 1);
else
strcpy(comm, "<...>");
}
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 1de2ef8ec926..f527d8e65d69 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -123,9 +123,7 @@ static unsigned long soft_lockup_nmi_warn;
#ifdef CONFIG_HARDLOCKUP_DETECTOR
unsigned int __read_mostly hardlockup_panic =
CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
static unsigned long __maybe_unused hardlockup_allcpu_dumped;
-#endif
/*
* We may not want to enable hard lockup detection by default in all cases,
* for example when running the kernel as a guest on a hypervisor. In these