diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/fork.c | 8 | ||||
| -rw-r--r-- | kernel/irq/chip.c | 2 | ||||
| -rw-r--r-- | kernel/kprobes.c | 2 | ||||
| -rw-r--r-- | kernel/pid_namespace.c | 2 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 29 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_kprobe.c | 5 |
7 files changed, 39 insertions, 11 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 52272755f69a..cf0013cdf3c7 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1591,11 +1591,13 @@ static struct task_struct *copy_process(unsigned long clone_flags, */ recalc_sigpending(); if (signal_pending(current)) { - spin_unlock(¤t->sighand->siglock); - write_unlock_irq(&tasklist_lock); retval = -ERESTARTNOINTR; goto bad_fork_cancel_cgroup; } + if (unlikely(!(ns_of_pid(pid)->nr_hashed & PIDNS_HASH_ADDING))) { + retval = -ENOMEM; + goto bad_fork_cancel_cgroup; + } if (likely(p->pid)) { ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); @@ -1646,6 +1648,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, return p; bad_fork_cancel_cgroup: + spin_unlock(¤t->sighand->siglock); + write_unlock_irq(&tasklist_lock); cgroup_cancel_fork(p, cgrp_ss_priv); bad_fork_free_pid: threadgroup_change_end(current); diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 15206453b12a..e4453d9f788c 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -810,8 +810,8 @@ irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, if (!desc) return; - __irq_do_set_handler(desc, handle, 1, NULL); desc->irq_common_data.handler_data = data; + __irq_do_set_handler(desc, handle, 1, NULL); irq_put_desc_busunlock(desc, flags); } diff --git a/kernel/kprobes.c b/kernel/kprobes.c index d10ab6b9b5e0..695763516908 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -563,7 +563,7 @@ static void kprobe_optimizer(struct work_struct *work) } /* Wait for completing optimization and unoptimization */ -static void wait_for_kprobe_optimizer(void) +void wait_for_kprobe_optimizer(void) { mutex_lock(&kprobe_mutex); diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index a65ba137fd15..567ecc826bc8 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -255,7 +255,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns) * if reparented. */ for (;;) { - set_current_state(TASK_UNINTERRUPTIBLE); + set_current_state(TASK_INTERRUPTIBLE); if (pid_ns->nr_hashed == init_pids) break; schedule(); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 83cfb72b2d95..6f353de3f390 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3961,6 +3961,26 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq) if (!cfs_bandwidth_used()) return; + /* Synchronize hierarchical throttle counter: */ + if (unlikely(!cfs_rq->throttle_uptodate)) { + struct rq *rq = rq_of(cfs_rq); + struct cfs_rq *pcfs_rq; + struct task_group *tg; + + cfs_rq->throttle_uptodate = 1; + + /* Get closest up-to-date node, because leaves go first: */ + for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) { + pcfs_rq = tg->cfs_rq[cpu_of(rq)]; + if (pcfs_rq->throttle_uptodate) + break; + } + if (tg) { + cfs_rq->throttle_count = pcfs_rq->throttle_count; + cfs_rq->throttled_clock_task = rq_clock_task(rq); + } + } + /* an active group must be handled by the update_curr()->put() path */ if (!cfs_rq->runtime_enabled || cfs_rq->curr) return; @@ -4346,15 +4366,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) /* Don't dequeue parent if it has other entities besides us */ if (cfs_rq->load.weight) { + /* Avoid re-evaluating load for this entity: */ + se = parent_entity(se); /* * Bias pick_next to pick a task from this cfs_rq, as * p is sleeping when it is within its sched_slice. */ - if (task_sleep && parent_entity(se)) - set_next_buddy(parent_entity(se)); - - /* avoid re-evaluating load for this entity */ - se = parent_entity(se); + if (task_sleep && se && !throttled_hierarchy(cfs_rq)) + set_next_buddy(se); break; } flags |= DEQUEUE_SLEEP; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 780522c65cea..0386a2cdb008 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -421,7 +421,7 @@ struct cfs_rq { u64 throttled_clock, throttled_clock_task; u64 throttled_clock_task_time; - int throttled, throttle_count; + int throttled, throttle_count, throttle_uptodate; struct list_head throttled_list; #endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_FAIR_GROUP_SCHED */ diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index c9956440d0e6..12ea4ea619ee 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1471,6 +1471,11 @@ static __init int kprobe_trace_self_tests_init(void) end: release_all_trace_kprobes(); + /* + * Wait for the optimizer work to finish. Otherwise it might fiddle + * with probes in already freed __init text. + */ + wait_for_kprobe_optimizer(); if (warn) pr_cont("NG: Some tests are failed. Please check them.\n"); else |
