summaryrefslogtreecommitdiff
path: root/kernel/exit.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/exit.c')
-rw-r--r--kernel/exit.c46
1 files changed, 34 insertions, 12 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 92ff63200287..d8a12cc06aee 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -390,6 +390,7 @@ static void exit_mm(struct task_struct *tsk)
{
struct mm_struct *mm = tsk->mm;
struct core_state *core_state;
+ int mm_released;
mm_release(tsk, mm);
if (!mm)
@@ -436,9 +437,12 @@ static void exit_mm(struct task_struct *tsk)
enter_lazy_tlb(mm, current);
task_unlock(tsk);
mm_update_next_owner(mm);
- mmput(mm);
+
+ mm_released = mmput(mm);
if (test_thread_flag(TIF_MEMDIE))
exit_oom_victim();
+ if (mm_released)
+ set_tsk_thread_flag(tsk, TIF_MM_RELEASED);
}
static struct task_struct *find_alive_thread(struct task_struct *p)
@@ -634,6 +638,7 @@ static void check_stack_usage(void)
static DEFINE_SPINLOCK(low_water_lock);
static int lowest_to_date = THREAD_SIZE;
unsigned long free;
+ int islower = false;
free = stack_not_used(current);
@@ -642,11 +647,16 @@ static void check_stack_usage(void)
spin_lock(&low_water_lock);
if (free < lowest_to_date) {
- pr_warn("%s (%d) used greatest stack depth: %lu bytes left\n",
- current->comm, task_pid_nr(current), free);
lowest_to_date = free;
+ islower = true;
}
spin_unlock(&low_water_lock);
+
+ if (islower) {
+ printk(KERN_WARNING "%s (%d) used greatest stack depth: "
+ "%lu bytes left\n",
+ current->comm, task_pid_nr(current), free);
+ }
}
#else
static inline void check_stack_usage(void) {}
@@ -702,6 +712,7 @@ void do_exit(long code)
exit_signals(tsk); /* sets PF_EXITING */
+ sched_exit(tsk);
schedtune_exit_task(tsk);
/*
@@ -923,17 +934,28 @@ static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
task_pid_type(p, wo->wo_type) == wo->wo_pid;
}
-static int eligible_child(struct wait_opts *wo, struct task_struct *p)
+static int
+eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p)
{
if (!eligible_pid(wo, p))
return 0;
- /* Wait for all children (clone and not) if __WALL is set;
- * otherwise, wait for clone children *only* if __WCLONE is
- * set; otherwise, wait for non-clone children *only*. (Note:
- * A "clone" child here is one that reports to its parent
- * using a signal other than SIGCHLD.) */
- if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
- && !(wo->wo_flags & __WALL))
+
+ /*
+ * Wait for all children (clone and not) if __WALL is set or
+ * if it is traced by us.
+ */
+ if (ptrace || (wo->wo_flags & __WALL))
+ return 1;
+
+ /*
+ * Otherwise, wait for clone children *only* if __WCLONE is set;
+ * otherwise, wait for non-clone children *only*.
+ *
+ * Note: a "clone" child here is one that reports to its parent
+ * using a signal other than SIGCHLD, or a non-leader thread which
+ * we can only see if it is traced by us.
+ */
+ if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
return 0;
return 1;
@@ -1306,7 +1328,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
if (unlikely(exit_state == EXIT_DEAD))
return 0;
- ret = eligible_child(wo, p);
+ ret = eligible_child(wo, ptrace, p);
if (!ret)
return ret;