summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c15
-rw-r--r--kernel/cpuset.c18
-rw-r--r--kernel/events/core.c30
-rw-r--r--kernel/sched/core.c8
-rw-r--r--kernel/sched/fair.c3
5 files changed, 47 insertions, 27 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 37731292f8a1..1cfd381642da 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -365,21 +365,6 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
goto out_release;
}
- /*
- * By now we've cleared cpu_active_mask, wait for all preempt-disabled
- * and RCU users of this state to go away such that all new such users
- * will observe it.
- *
- * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
- * not imply sync_sched(), so wait for both.
- *
- * Do sync before park smpboot threads to take care the rcu boost case.
- */
- if (IS_ENABLED(CONFIG_PREEMPT))
- synchronize_rcu_mult(call_rcu, call_rcu_sched);
- else
- synchronize_rcu();
-
smpboot_park_threads(cpu);
/*
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 2ade632197d5..2df78d45a096 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2075,12 +2075,30 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
mutex_unlock(&cpuset_mutex);
}
+static int cpuset_allow_attach(struct cgroup_taskset *tset)
+{
+ const struct cred *cred = current_cred(), *tcred;
+ struct task_struct *task;
+ struct cgroup_subsys_state *css;
+
+ cgroup_taskset_for_each(task, css, tset) {
+ tcred = __task_cred(task);
+
+ if ((current != task) && !capable(CAP_SYS_ADMIN) &&
+ cred->euid.val != tcred->uid.val && cred->euid.val != tcred->suid.val)
+ return -EACCES;
+ }
+
+ return 0;
+}
+
struct cgroup_subsys cpuset_cgrp_subsys = {
.css_alloc = cpuset_css_alloc,
.css_online = cpuset_css_online,
.css_offline = cpuset_css_offline,
.css_free = cpuset_css_free,
.can_attach = cpuset_can_attach,
+ .allow_attach = cpuset_allow_attach,
.cancel_attach = cpuset_cancel_attach,
.attach = cpuset_attach,
.bind = cpuset_bind,
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 96100cc046c5..32e2617d654f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -158,6 +158,7 @@ enum event_type_t {
struct static_key_deferred perf_sched_events __read_mostly;
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
static DEFINE_PER_CPU(int, perf_sched_cb_usages);
+static DEFINE_PER_CPU(bool, is_idle);
static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
@@ -3388,9 +3389,12 @@ static int perf_event_read(struct perf_event *event, bool group)
.group = group,
.ret = 0,
};
- smp_call_function_single(event->oncpu,
- __perf_event_read, &data, 1);
- ret = data.ret;
+ if (!event->attr.exclude_idle ||
+ !per_cpu(is_idle, event->oncpu)) {
+ smp_call_function_single(event->oncpu,
+ __perf_event_read, &data, 1);
+ ret = data.ret;
+ }
} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
struct perf_event_context *ctx = event->ctx;
unsigned long flags;
@@ -9479,6 +9483,25 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
return NOTIFY_OK;
}
+static int event_idle_notif(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ switch (action) {
+ case IDLE_START:
+ __this_cpu_write(is_idle, true);
+ break;
+ case IDLE_END:
+ __this_cpu_write(is_idle, false);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block perf_event_idle_nb = {
+ .notifier_call = event_idle_notif,
+};
+
void __init perf_event_init(void)
{
int ret;
@@ -9492,6 +9515,7 @@ void __init perf_event_init(void)
perf_pmu_register(&perf_task_clock, NULL, -1);
perf_tp_register();
perf_cpu_notifier(perf_cpu_notify);
+ idle_notifier_register(&perf_event_idle_nb);
register_reboot_notifier(&perf_reboot_notifier);
ret = init_hw_breakpoint();
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 58303b3dc356..db0472b37feb 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -85,9 +85,6 @@
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#endif
-#ifdef CONFIG_MSM_APP_SETTINGS
-#include <asm/app_api.h>
-#endif
#include "sched.h"
#include "../workqueue_internal.h"
@@ -5895,11 +5892,6 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
fire_sched_out_preempt_notifiers(prev, next);
prepare_lock_switch(rq, next);
prepare_arch_switch(next);
-
-#ifdef CONFIG_MSM_APP_SETTINGS
- if (use_app_setting)
- switch_app_setting_bit(prev, next);
-#endif
}
/**
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 958d79e1933c..584cd048c24b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4096,7 +4096,8 @@ static inline int migration_needed(struct task_struct *p, int cpu)
int nice;
struct related_thread_group *grp;
- if (!sched_enable_hmp || p->state != TASK_RUNNING)
+ if (!sched_enable_hmp || p->state != TASK_RUNNING ||
+ p->nr_cpus_allowed == 1)
return 0;
/* No need to migrate task that is about to be throttled */