summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorMichael Bestas <mkbestas@lineageos.org>2021-05-31 03:09:03 +0300
committerMichael Bestas <mkbestas@lineageos.org>2021-05-31 03:09:03 +0300
commit0ce166e3c4e576ae072d950006a937931df8ca3c (patch)
tree87428d6651cc3cd42c586331c67590f3a9e2c578 /kernel
parenta4940e8fb458a45644126d132c2d5b74719df8df (diff)
parent3628cdd31199df6519ecad709f5cc8be9401f93e (diff)
Merge branch 'android-4.4-p' of https://android.googlesource.com/kernel/common into lineage-18.1-caf-msm8998
This brings LA.UM.9.2.r1-03300-SDMxx0.0 up to date with https://android.googlesource.com/kernel/common/ android-4.4-p at commit: 3628cdd31199d Merge 4.4.270 into android-4.4-p Conflicts: drivers/mmc/core/core.c drivers/usb/core/hub.c kernel/trace/trace.c Change-Id: I6b81471122341f9769ce9c65cbd0fedd5e908b38
Diffstat (limited to 'kernel')
-rw-r--r--kernel/kexec_file.c4
-rw-r--r--kernel/ptrace.c18
-rw-r--r--kernel/trace/ftrace.c5
-rw-r--r--kernel/trace/trace.c45
-rw-r--r--kernel/trace/trace_clock.c44
5 files changed, 73 insertions, 43 deletions
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index 6030efd4a188..1210cd6bcaa6 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -575,8 +575,10 @@ static int kexec_calculate_store_digests(struct kimage *image)
sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region);
sha_regions = vzalloc(sha_region_sz);
- if (!sha_regions)
+ if (!sha_regions) {
+ ret = -ENOMEM;
goto out_free_desc;
+ }
desc->tfm = tfm;
desc->flags = 0;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index da8c358930fb..5a1d8cc7ef4e 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -129,6 +129,21 @@ void __ptrace_unlink(struct task_struct *child)
spin_unlock(&child->sighand->siglock);
}
+static bool looks_like_a_spurious_pid(struct task_struct *task)
+{
+ if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP))
+ return false;
+
+ if (task_pid_vnr(task) == task->ptrace_message)
+ return false;
+ /*
+ * The tracee changed its pid but the PTRACE_EVENT_EXEC event
+ * was not wait()'ed, most probably debugger targets the old
+ * leader which was destroyed in de_thread().
+ */
+ return true;
+}
+
/* Ensure that nothing can wake it up, even SIGKILL */
static bool ptrace_freeze_traced(struct task_struct *task)
{
@@ -139,7 +154,8 @@ static bool ptrace_freeze_traced(struct task_struct *task)
return ret;
spin_lock_irq(&task->sighand->siglock);
- if (task_is_traced(task) && !__fatal_signal_pending(task)) {
+ if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
+ !__fatal_signal_pending(task)) {
task->state = __TASK_TRACED;
ret = true;
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b56b1daa0a59..93c2abe27871 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4401,8 +4401,11 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
parser = &iter->parser;
if (trace_parser_loaded(parser)) {
+ int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
+
parser->buffer[parser->idx] = 0;
- ftrace_match_records(iter->hash, parser->buffer, parser->idx);
+ ftrace_process_regex(iter->hash, parser->buffer,
+ parser->idx, enable);
}
trace_parser_put(parser);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 62e1befbad7f..4fa248746dd8 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1580,10 +1580,13 @@ void trace_stop_cmdline_recording(void);
static int trace_save_cmdline(struct task_struct *tsk)
{
- unsigned pid, idx;
+ unsigned tpid, idx;
- if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
- return 0;
+ /* treat recording of idle task as a success */
+ if (!tsk->pid)
+ return 1;
+
+ tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
preempt_disable();
/*
@@ -1597,26 +1600,15 @@ static int trace_save_cmdline(struct task_struct *tsk)
return 0;
}
- idx = savedcmd->map_pid_to_cmdline[tsk->pid];
+ idx = savedcmd->map_pid_to_cmdline[tpid];
if (idx == NO_CMDLINE_MAP) {
idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
- /*
- * Check whether the cmdline buffer at idx has a pid
- * mapped. We are going to overwrite that entry so we
- * need to clear the map_pid_to_cmdline. Otherwise we
- * would read the new comm for the old pid.
- */
- pid = savedcmd->map_cmdline_to_pid[idx];
- if (pid != NO_CMDLINE_MAP)
- savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
-
- savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
- savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
-
+ savedcmd->map_pid_to_cmdline[tpid] = idx;
savedcmd->cmdline_idx = idx;
}
+ savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
set_cmdline(idx, tsk->comm);
savedcmd->map_cmdline_to_tgid[idx] = tsk->tgid;
arch_spin_unlock(&trace_cmdline_lock);
@@ -1628,6 +1620,7 @@ static int trace_save_cmdline(struct task_struct *tsk)
static void __trace_find_cmdline(int pid, char comm[])
{
unsigned map;
+ int tpid;
if (!pid) {
strcpy(comm, "<idle>");
@@ -1639,16 +1632,16 @@ static void __trace_find_cmdline(int pid, char comm[])
return;
}
- if (pid > PID_MAX_DEFAULT) {
- strcpy(comm, "<...>");
- return;
+ tpid = pid & (PID_MAX_DEFAULT - 1);
+ map = savedcmd->map_pid_to_cmdline[tpid];
+ if (map != NO_CMDLINE_MAP) {
+ tpid = savedcmd->map_cmdline_to_pid[map];
+ if (tpid == pid) {
+ strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
+ return;
+ }
}
-
- map = savedcmd->map_pid_to_cmdline[pid];
- if (map != NO_CMDLINE_MAP)
- strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN - 1);
- else
- strcpy(comm, "<...>");
+ strcpy(comm, "<...>");
}
void trace_find_cmdline(int pid, char comm[])
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 0f06532a755b..b67ea5eed2a8 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -93,33 +93,49 @@ u64 notrace trace_clock_global(void)
{
unsigned long flags;
int this_cpu;
- u64 now;
+ u64 now, prev_time;
local_irq_save(flags);
this_cpu = raw_smp_processor_id();
- now = sched_clock_cpu(this_cpu);
+
/*
- * If in an NMI context then dont risk lockups and return the
- * cpu_clock() time:
+ * The global clock "guarantees" that the events are ordered
+ * between CPUs. But if two events on two different CPUS call
+ * trace_clock_global at roughly the same time, it really does
+ * not matter which one gets the earlier time. Just make sure
+ * that the same CPU will always show a monotonic clock.
+ *
+ * Use a read memory barrier to get the latest written
+ * time that was recorded.
*/
- if (unlikely(in_nmi()))
- goto out;
+ smp_rmb();
+ prev_time = READ_ONCE(trace_clock_struct.prev_time);
+ now = sched_clock_cpu(this_cpu);
- arch_spin_lock(&trace_clock_struct.lock);
+ /* Make sure that now is always greater than prev_time */
+ if ((s64)(now - prev_time) < 0)
+ now = prev_time + 1;
/*
- * TODO: if this happens often then maybe we should reset
- * my_scd->clock to prev_time+1, to make sure
- * we start ticking with the local clock from now on?
+ * If in an NMI context then dont risk lockups and simply return
+ * the current time.
*/
- if ((s64)(now - trace_clock_struct.prev_time) < 0)
- now = trace_clock_struct.prev_time + 1;
+ if (unlikely(in_nmi()))
+ goto out;
- trace_clock_struct.prev_time = now;
+ /* Tracing can cause strange recursion, always use a try lock */
+ if (arch_spin_trylock(&trace_clock_struct.lock)) {
+ /* Reread prev_time in case it was already updated */
+ prev_time = READ_ONCE(trace_clock_struct.prev_time);
+ if ((s64)(now - prev_time) < 0)
+ now = prev_time + 1;
- arch_spin_unlock(&trace_clock_struct.lock);
+ trace_clock_struct.prev_time = now;
+ /* The unlock acts as the wmb for the above rmb */
+ arch_spin_unlock(&trace_clock_struct.lock);
+ }
out:
local_irq_restore(flags);