diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpu.c | 23 | ||||
-rw-r--r-- | kernel/irq/manage.c | 4 | ||||
-rw-r--r-- | kernel/ptrace.c | 10 | ||||
-rw-r--r-- | kernel/sched/core.c | 24 | ||||
-rw-r--r-- | kernel/sched/fair.c | 4 | ||||
-rw-r--r-- | kernel/sched/sched.h | 1 | ||||
-rw-r--r-- | kernel/time/timer_stats.c | 2 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 2 |
8 files changed, 66 insertions, 4 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 24ca2963754b..427ec803d455 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -8,6 +8,7 @@ #include <linux/init.h> #include <linux/notifier.h> #include <linux/sched.h> +#include <linux/sched/smt.h> #include <linux/unistd.h> #include <linux/cpu.h> #include <linux/oom.h> @@ -201,6 +202,12 @@ void cpu_hotplug_enable(void) EXPORT_SYMBOL_GPL(cpu_hotplug_enable); #endif /* CONFIG_HOTPLUG_CPU */ +/* + * Architectures that need SMT-specific errata handling during SMT hotplug + * should override this. + */ +void __weak arch_smt_update(void) { } + /* Need to know about CPUs going up/down? */ int register_cpu_notifier(struct notifier_block *nb) { @@ -437,6 +444,7 @@ out_release: trace_sched_cpu_hotplug(cpu, err, 0); if (!err) cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); + arch_smt_update(); return err; } @@ -541,7 +549,7 @@ out_notify: out: cpu_hotplug_done(); trace_sched_cpu_hotplug(cpu, ret, 1); - + arch_smt_update(); return ret; } @@ -846,6 +854,19 @@ void init_cpu_online(const struct cpumask *src) cpumask_copy(to_cpumask(cpu_online_bits), src); } +enum cpu_mitigations cpu_mitigations = CPU_MITIGATIONS_AUTO; + +static int __init mitigations_parse_cmdline(char *arg) +{ + if (!strcmp(arg, "off")) + cpu_mitigations = CPU_MITIGATIONS_OFF; + else if (!strcmp(arg, "auto")) + cpu_mitigations = CPU_MITIGATIONS_AUTO; + + return 0; +} +early_param("mitigations", mitigations_parse_cmdline); + static ATOMIC_NOTIFIER_HEAD(idle_notifier); void idle_notifier_register(struct notifier_block *n) diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 83cea913983c..92c7eb1aeded 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -319,8 +319,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) desc->affinity_notify = notify; raw_spin_unlock_irqrestore(&desc->lock, flags); - if (old_notify) + if (old_notify) { + cancel_work_sync(&old_notify->work); kref_put(&old_notify->kref, old_notify->release); + } return 0; } diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 5e2cd1030702..8303874c2a06 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -228,6 +228,9 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state) static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode) { + if (mode & PTRACE_MODE_SCHED) + return false; + if (mode & PTRACE_MODE_NOAUDIT) return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE); else @@ -295,9 +298,16 @@ ok: !ptrace_has_cap(mm->user_ns, mode))) return -EPERM; + if (mode & PTRACE_MODE_SCHED) + return 0; return security_ptrace_access_check(task, mode); } +bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode) +{ + return __ptrace_may_access(task, mode | PTRACE_MODE_SCHED); +} + bool ptrace_may_access(struct task_struct *task, unsigned int mode) { int err; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 328e17dc2f9b..78ec10eee484 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5801,6 +5801,10 @@ static void set_cpu_rq_start_time(void) rq->age_stamp = sched_clock_cpu(cpu); } +#ifdef CONFIG_SCHED_SMT +atomic_t sched_smt_present = ATOMIC_INIT(0); +#endif + static int sched_cpu_active(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -5817,11 +5821,23 @@ static int sched_cpu_active(struct notifier_block *nfb, * set_cpu_online(). But it might not yet have marked itself * as active, which is essential from here on. */ +#ifdef CONFIG_SCHED_SMT + /* + * When going up, increment the number of cores with SMT present. + */ + if (cpumask_weight(cpu_smt_mask(cpu)) == 2) + atomic_inc(&sched_smt_present); +#endif set_cpu_active(cpu, true); stop_machine_unpark(cpu); return NOTIFY_OK; case CPU_DOWN_FAILED: +#ifdef CONFIG_SCHED_SMT + /* Same as for CPU_ONLINE */ + if (cpumask_weight(cpu_smt_mask(cpu)) == 2) + atomic_inc(&sched_smt_present); +#endif set_cpu_active(cpu, true); return NOTIFY_OK; @@ -5836,7 +5852,15 @@ static int sched_cpu_inactive(struct notifier_block *nfb, switch (action & ~CPU_TASKS_FROZEN) { case CPU_DOWN_PREPARE: set_cpu_active((long)hcpu, false); +#ifdef CONFIG_SCHED_SMT + /* + * When going down, decrement the number of cores with SMT present. + */ + if (cpumask_weight(cpu_smt_mask((long)hcpu)) == 2) + atomic_dec(&sched_smt_present); +#endif return NOTIFY_OK; + default: return NOTIFY_DONE; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 9b0352c34a3b..cf5916ea2ae1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1878,6 +1878,10 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period) if (p->last_task_numa_placement) { delta = runtime - p->last_sum_exec_runtime; *period = now - p->last_task_numa_placement; + + /* Avoid time going backwards, prevent potential divide error: */ + if (unlikely((s64)*period < 0)) + *period = 0; } else { delta = p->se.avg.load_sum / p->se.load.weight; *period = LOAD_AVG_MAX; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 7f18a3243af5..d066a6870245 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2,6 +2,7 @@ #include <linux/sched.h> #include <linux/sched/sysctl.h> #include <linux/sched/rt.h> +#include <linux/sched/smt.h> #include <linux/sched/deadline.h> #include <linux/mutex.h> #include <linux/spinlock.h> diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c index 1adecb4b87c8..7e4d715f9c22 100644 --- a/kernel/time/timer_stats.c +++ b/kernel/time/timer_stats.c @@ -417,7 +417,7 @@ static int __init init_tstats_procfs(void) { struct proc_dir_entry *pe; - pe = proc_create("timer_stats", 0644, NULL, &tstats_fops); + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops); if (!pe) return -ENOMEM; return 0; diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 5e091614fe39..1cf2402c6922 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -701,7 +701,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) preempt_disable_notrace(); time = rb_time_stamp(buffer); - preempt_enable_no_resched_notrace(); + preempt_enable_notrace(); return time; } |