summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c17
-rw-r--r--kernel/events/core.c6
-rw-r--r--kernel/power/Kconfig1
-rw-r--r--kernel/watchdog.c2
4 files changed, 22 insertions, 4 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 69d25607d41b..a7ec545308a6 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -326,8 +326,7 @@ static struct file_system_type cpuset_fs_type = {
/*
* Return in pmask the portion of a cpusets's cpus_allowed that
* are online. If none are online, walk up the cpuset hierarchy
- * until we find one that does have some online cpus. The top
- * cpuset always has some cpus online.
+ * until we find one that does have some online cpus.
*
* One way or another, we guarantee to return some non-empty subset
* of cpu_online_mask.
@@ -336,8 +335,20 @@ static struct file_system_type cpuset_fs_type = {
*/
static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
{
- while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask))
+ while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) {
cs = parent_cs(cs);
+ if (unlikely(!cs)) {
+ /*
+ * The top cpuset doesn't have any online cpu as a
+ * consequence of a race between cpuset_hotplug_work
+ * and cpu hotplug notifier. But we know the top
+ * cpuset's effective_cpus is on its way to to be
+ * identical to cpu_online_mask.
+ */
+ cpumask_copy(pmask, cpu_online_mask);
+ return;
+ }
+ }
cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 32e2617d654f..d6ec580584b6 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -176,9 +176,12 @@ static struct srcu_struct pmus_srcu;
* 0 - disallow raw tracepoint access for unpriv
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
+ * 3 - disallow all unpriv perf event use
*/
#ifdef CONFIG_PERF_EVENTS_USERMODE
int sysctl_perf_event_paranoid __read_mostly = -1;
+#elif defined CONFIG_SECURITY_PERF_EVENTS_RESTRICT
+int sysctl_perf_event_paranoid __read_mostly = 3;
#else
int sysctl_perf_event_paranoid __read_mostly = 1;
#endif
@@ -8325,6 +8328,9 @@ SYSCALL_DEFINE5(perf_event_open,
if (flags & ~PERF_FLAG_ALL)
return -EINVAL;
+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
err = perf_copy_attr(attr_uptr, &attr);
if (err)
return err;
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 84c480946fb2..6d6f63be1f9b 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -1,6 +1,7 @@
config SUSPEND
bool "Suspend to RAM and standby"
depends on ARCH_SUSPEND_POSSIBLE
+ select RTC_LIB
default y
---help---
Allow the system to enter sleep states in which main memory is
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 9472691c1eb0..029da92fb712 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -121,7 +121,7 @@ static unsigned long soft_lockup_nmi_warn;
unsigned int __read_mostly hardlockup_panic =
CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
-static unsigned long hardlockup_allcpu_dumped;
+static unsigned long __maybe_unused hardlockup_allcpu_dumped;
#endif
/*
* We may not want to enable hard lockup detection by default in all cases,