summaryrefslogtreecommitdiff
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig52
-rw-r--r--kernel/trace/Makefile6
-rw-r--r--kernel/trace/blktrace.c80
-rw-r--r--kernel/trace/ipc_logging.c876
-rw-r--r--kernel/trace/ipc_logging_debug.c184
-rw-r--r--kernel/trace/ipc_logging_private.h165
-rw-r--r--kernel/trace/msm_rtb.c346
-rw-r--r--kernel/trace/power-traces.c1
-rw-r--r--kernel/trace/trace.c114
-rw-r--r--kernel/trace/trace_cpu_freq_switch.c312
-rw-r--r--kernel/trace/trace_event_perf.c3
-rw-r--r--kernel/trace/trace_events.c5
-rw-r--r--kernel/trace/trace_irqsoff.c42
-rw-r--r--kernel/trace/trace_printk.c2
-rw-r--r--kernel/trace/trace_sched_wakeup.c3
-rw-r--r--kernel/trace/trace_syscalls.c4
-rw-r--r--kernel/trace/trace_uprobe.c2
17 files changed, 2152 insertions, 45 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 006eefb6ede0..d8b00f94c63d 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -89,6 +89,31 @@ config RING_BUFFER_ALLOW_SWAP
Allow the use of ring_buffer_swap_cpu.
Adds a very slight overhead to tracing when enabled.
+config IPC_LOGGING
+ bool "Debug Logging for IPC Drivers"
+ select GENERIC_TRACER
+ help
+ This option allows the debug logging for IPC Drivers.
+
+ If in doubt, say no.
+
+config QCOM_RTB
+ bool "Register tracing"
+ help
+ Add support for logging different events to a small uncached
+ region. This is designed to aid in debugging reset cases where the
+ caches may not be flushed before the target resets.
+
+config QCOM_RTB_SEPARATE_CPUS
+ bool "Separate entries for each cpu"
+ depends on QCOM_RTB
+ depends on SMP
+ help
+ Under some circumstances, it may be beneficial to give dedicated space
+ for each cpu to log accesses. Selecting this option will log each cpu
+ separately. This will guarantee that the last acesses for each cpu
+ will be logged but there will be fewer entries per cpu
+
# All tracer options should select GENERIC_TRACER. For those options that are
# enabled by all tracers (context switch and event tracer) they select TRACING.
# This allows those options to appear when no other tracer is selected. But the
@@ -111,6 +136,20 @@ config GENERIC_TRACER
bool
select TRACING
+if TRACING
+
+config DISABLE_TRACE_PRINTK
+ bool "Force disable trace_printk() usage"
+ default y
+ help
+ When trace_printk() is used in any of the kernel source, it enables
+ debugging functions which are not desired for production kernel.
+ Enabling this option will replace trace_printk() with pr_debug().
+
+ If in doubt, say Y.
+
+endif
+
#
# Minimum requirements an architecture has to meet for us to
# be able to offer generic tracing facilities:
@@ -499,6 +538,19 @@ config FUNCTION_PROFILER
If in doubt, say N.
+config CPU_FREQ_SWITCH_PROFILER
+ bool "CPU frequency switch time profiler"
+ select GENERIC_TRACER
+ help
+ This option enables the CPU frequency switch profiler. A file is
+ created in debugfs called "cpu_freq_switch_profile_enabled", which
+ defaults to zero. When a 1 is echoed into this file, profiling begins.
+ When a zero is echoed, profiling stops. A "cpu_freq_switch" file is
+ also created in the trace_stats directory; this file shows the
+ switches that have occurred and duration statistics.
+
+ If in doubt, say N.
+
config FTRACE_MCOUNT_RECORD
def_bool y
depends on DYNAMIC_FTRACE
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 4b35fb97ae44..a0177ae43058 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_PREEMPTIRQ_EVENTS) += trace_irqsoff.o
obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
+obj-$(CONFIG_CPU_FREQ_SWITCH_PROFILER) += trace_cpu_freq_switch.o
obj-$(CONFIG_NOP_TRACER) += trace_nop.o
obj-$(CONFIG_STACK_TRACER) += trace_stack.o
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
@@ -70,7 +71,12 @@ endif
obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
obj-$(CONFIG_UPROBE_EVENT) += trace_uprobe.o
obj-$(CONFIG_GPU_TRACEPOINTS) += gpu-traces.o
+obj-$(CONFIG_QCOM_RTB) += msm_rtb.o
obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
+obj-$(CONFIG_IPC_LOGGING) += ipc_logging.o
+ifdef CONFIG_DEBUG_FS
+obj-$(CONFIG_IPC_LOGGING) += ipc_logging_debug.o
+endif
libftrace-y := ftrace.o
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 8ac3663e0012..267def89a2db 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -203,9 +203,9 @@ static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
* blk_io_trace structure and places it in a per-cpu subbuffer.
*/
static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
- int rw, u32 what, int error, int pdu_len, void *pdu_data)
+ int rw, u32 what, int error, int pdu_len,
+ void *pdu_data, struct task_struct *tsk)
{
- struct task_struct *tsk = current;
struct ring_buffer_event *event = NULL;
struct ring_buffer *buffer = NULL;
struct blk_io_trace *t;
@@ -783,6 +783,7 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
unsigned int nr_bytes, u32 what)
{
struct blk_trace *bt;
+ struct task_struct *tsk = current;
rcu_read_lock();
bt = rcu_dereference(q->blk_trace);
@@ -791,14 +792,28 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
return;
}
+ /*
+ * Use the bio context for all events except ISSUE and
+ * COMPLETE events.
+ *
+ * Not all the pages in the bio are dirtied by the same task but
+ * most likely it will be, since the sectors accessed on the device
+ * must be adjacent.
+ */
+ if (!((what == BLK_TA_ISSUE) || (what == BLK_TA_COMPLETE)) &&
+ bio_has_data(rq->bio) && rq->bio->bi_io_vec &&
+ rq->bio->bi_io_vec->bv_page &&
+ rq->bio->bi_io_vec->bv_page->tsk_dirty)
+ tsk = rq->bio->bi_io_vec->bv_page->tsk_dirty;
+
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
what |= BLK_TC_ACT(BLK_TC_PC);
__blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags,
- what, rq->errors, rq->cmd_len, rq->cmd);
+ what, rq->errors, rq->cmd_len, rq->cmd, tsk);
} else {
what |= BLK_TC_ACT(BLK_TC_FS);
__blk_add_trace(bt, blk_rq_pos(rq), nr_bytes,
- rq->cmd_flags, what, rq->errors, 0, NULL);
+ rq->cmd_flags, what, rq->errors, 0, NULL, tsk);
}
rcu_read_unlock();
}
@@ -851,6 +866,7 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
u32 what, int error)
{
struct blk_trace *bt;
+ struct task_struct *tsk = current;
rcu_read_lock();
bt = rcu_dereference(q->blk_trace);
@@ -859,8 +875,17 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
return;
}
+ /*
+ * Not all the pages in the bio are dirtied by the same task but
+ * most likely it will be, since the sectors accessed on the device
+ * must be adjacent.
+ */
+ if (bio_has_data(bio) && bio->bi_io_vec && bio->bi_io_vec->bv_page &&
+ bio->bi_io_vec->bv_page->tsk_dirty)
+ tsk = bio->bi_io_vec->bv_page->tsk_dirty;
+
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
- bio->bi_rw, what, error, 0, NULL);
+ bio->bi_rw, what, error, 0, NULL, tsk);
rcu_read_unlock();
}
@@ -911,7 +936,8 @@ static void blk_add_trace_getrq(void *ignore,
rcu_read_lock();
bt = rcu_dereference(q->blk_trace);
if (bt)
- __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
+ __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0,
+ NULL, current);
rcu_read_unlock();
}
}
@@ -930,7 +956,7 @@ static void blk_add_trace_sleeprq(void *ignore,
bt = rcu_dereference(q->blk_trace);
if (bt)
__blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
- 0, 0, NULL);
+ 0, 0, NULL, current);
rcu_read_unlock();
}
}
@@ -942,7 +968,8 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
rcu_read_lock();
bt = rcu_dereference(q->blk_trace);
if (bt)
- __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
+ __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL,
+ current);
rcu_read_unlock();
}
@@ -962,7 +989,8 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
else
what = BLK_TA_UNPLUG_TIMER;
- __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
+ __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu,
+ current);
}
rcu_read_unlock();
}
@@ -972,15 +1000,21 @@ static void blk_add_trace_split(void *ignore,
unsigned int pdu)
{
struct blk_trace *bt;
+ struct task_struct *tsk = current;
rcu_read_lock();
bt = rcu_dereference(q->blk_trace);
if (bt) {
__be64 rpdu = cpu_to_be64(pdu);
+ if (bio_has_data(bio) && bio->bi_io_vec &&
+ bio->bi_io_vec->bv_page &&
+ bio->bi_io_vec->bv_page->tsk_dirty)
+ tsk = bio->bi_io_vec->bv_page->tsk_dirty;
+
__blk_add_trace(bt, bio->bi_iter.bi_sector,
bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT,
- bio->bi_error, sizeof(rpdu), &rpdu);
+ bio->bi_error, sizeof(rpdu), &rpdu, tsk);
}
rcu_read_unlock();
}
@@ -1004,6 +1038,7 @@ static void blk_add_trace_bio_remap(void *ignore,
{
struct blk_trace *bt;
struct blk_io_trace_remap r;
+ struct task_struct *tsk = current;
rcu_read_lock();
bt = rcu_dereference(q->blk_trace);
@@ -1016,9 +1051,14 @@ static void blk_add_trace_bio_remap(void *ignore,
r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev);
r.sector_from = cpu_to_be64(from);
+ if (bio_has_data(bio) && bio->bi_io_vec &&
+ bio->bi_io_vec->bv_page &&
+ bio->bi_io_vec->bv_page->tsk_dirty)
+ tsk = bio->bi_io_vec->bv_page->tsk_dirty;
+
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
bio->bi_rw, BLK_TA_REMAP, bio->bi_error,
- sizeof(r), &r);
+ sizeof(r), &r, tsk);
rcu_read_unlock();
}
@@ -1042,6 +1082,7 @@ static void blk_add_trace_rq_remap(void *ignore,
{
struct blk_trace *bt;
struct blk_io_trace_remap r;
+ struct task_struct *tsk = current;
rcu_read_lock();
bt = rcu_dereference(q->blk_trace);
@@ -1054,9 +1095,14 @@ static void blk_add_trace_rq_remap(void *ignore,
r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
r.sector_from = cpu_to_be64(from);
+ if (bio_has_data(rq->bio) && rq->bio->bi_io_vec &&
+ rq->bio->bi_io_vec->bv_page &&
+ rq->bio->bi_io_vec->bv_page->tsk_dirty)
+ tsk = rq->bio->bi_io_vec->bv_page->tsk_dirty;
+
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
- sizeof(r), &r);
+ sizeof(r), &r, tsk);
rcu_read_unlock();
}
@@ -1076,6 +1122,7 @@ void blk_add_driver_data(struct request_queue *q,
void *data, size_t len)
{
struct blk_trace *bt;
+ struct task_struct *tsk = current;
rcu_read_lock();
bt = rcu_dereference(q->blk_trace);
@@ -1084,12 +1131,17 @@ void blk_add_driver_data(struct request_queue *q,
return;
}
+ if (bio_has_data(rq->bio) && rq->bio->bi_io_vec &&
+ rq->bio->bi_io_vec->bv_page &&
+ rq->bio->bi_io_vec->bv_page->tsk_dirty)
+ tsk = rq->bio->bi_io_vec->bv_page->tsk_dirty;
+
if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
__blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
- BLK_TA_DRV_DATA, rq->errors, len, data);
+ BLK_TA_DRV_DATA, rq->errors, len, data, tsk);
else
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
- BLK_TA_DRV_DATA, rq->errors, len, data);
+ BLK_TA_DRV_DATA, rq->errors, len, data, tsk);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(blk_add_driver_data);
diff --git a/kernel/trace/ipc_logging.c b/kernel/trace/ipc_logging.c
new file mode 100644
index 000000000000..ed29c38cd7fb
--- /dev/null
+++ b/kernel/trace/ipc_logging.c
@@ -0,0 +1,876 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/arch_timer.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/idr.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/ipc_logging.h>
+
+#include "ipc_logging_private.h"
+
+#define LOG_PAGE_DATA_SIZE sizeof(((struct ipc_log_page *)0)->data)
+#define LOG_PAGE_FLAG (1 << 31)
+
+static LIST_HEAD(ipc_log_context_list);
+static DEFINE_RWLOCK(context_list_lock_lha1);
+static void *get_deserialization_func(struct ipc_log_context *ilctxt,
+ int type);
+
+static struct ipc_log_page *get_first_page(struct ipc_log_context *ilctxt)
+{
+ struct ipc_log_page_header *p_pghdr;
+ struct ipc_log_page *pg = NULL;
+
+ if (!ilctxt)
+ return NULL;
+ p_pghdr = list_first_entry(&ilctxt->page_list,
+ struct ipc_log_page_header, list);
+ pg = container_of(p_pghdr, struct ipc_log_page, hdr);
+ return pg;
+}
+
+/**
+ * is_nd_read_empty - Returns true if no data is available to read in log
+ *
+ * @ilctxt: logging context
+ * @returns: > 1 if context is empty; 0 if not empty; <0 for failure
+ *
+ * This is for the debugfs read pointer which allows for a non-destructive read.
+ * There may still be data in the log, but it may have already been read.
+ */
+static int is_nd_read_empty(struct ipc_log_context *ilctxt)
+{
+ if (!ilctxt)
+ return -EINVAL;
+
+ return ((ilctxt->nd_read_page == ilctxt->write_page) &&
+ (ilctxt->nd_read_page->hdr.nd_read_offset ==
+ ilctxt->write_page->hdr.write_offset));
+}
+
+/**
+ * is_read_empty - Returns true if no data is available in log
+ *
+ * @ilctxt: logging context
+ * @returns: > 1 if context is empty; 0 if not empty; <0 for failure
+ *
+ * This is for the actual log contents. If it is empty, then there
+ * is no data at all in the log.
+ */
+static int is_read_empty(struct ipc_log_context *ilctxt)
+{
+ if (!ilctxt)
+ return -EINVAL;
+
+ return ((ilctxt->read_page == ilctxt->write_page) &&
+ (ilctxt->read_page->hdr.read_offset ==
+ ilctxt->write_page->hdr.write_offset));
+}
+
+/**
+ * is_nd_read_equal_read - Return true if the non-destructive read is equal to
+ * the destructive read
+ *
+ * @ilctxt: logging context
+ * @returns: true if nd read is equal to read; false otherwise
+ */
+static bool is_nd_read_equal_read(struct ipc_log_context *ilctxt)
+{
+ uint16_t read_offset;
+ uint16_t nd_read_offset;
+
+ if (ilctxt->nd_read_page == ilctxt->read_page) {
+ read_offset = ilctxt->read_page->hdr.read_offset;
+ nd_read_offset = ilctxt->nd_read_page->hdr.nd_read_offset;
+
+ if (read_offset == nd_read_offset)
+ return true;
+ }
+
+ return false;
+}
+
+
+static struct ipc_log_page *get_next_page(struct ipc_log_context *ilctxt,
+ struct ipc_log_page *cur_pg)
+{
+ struct ipc_log_page_header *p_pghdr;
+ struct ipc_log_page *pg = NULL;
+
+ if (!ilctxt || !cur_pg)
+ return NULL;
+
+ if (ilctxt->last_page == cur_pg)
+ return ilctxt->first_page;
+
+ p_pghdr = list_first_entry(&cur_pg->hdr.list,
+ struct ipc_log_page_header, list);
+ pg = container_of(p_pghdr, struct ipc_log_page, hdr);
+
+ return pg;
+}
+
+/**
+ * ipc_log_read - do non-destructive read of the log
+ *
+ * @ilctxt: Logging context
+ * @data: Data pointer to receive the data
+ * @data_size: Number of bytes to read (must be <= bytes available in log)
+ *
+ * This read will update a runtime read pointer, but will not affect the actual
+ * contents of the log which allows for reading the logs continuously while
+ * debugging and if the system crashes, then the full logs can still be
+ * extracted.
+ */
+static void ipc_log_read(struct ipc_log_context *ilctxt,
+ void *data, int data_size)
+{
+ int bytes_to_read;
+
+ bytes_to_read = MIN(LOG_PAGE_DATA_SIZE
+ - ilctxt->nd_read_page->hdr.nd_read_offset,
+ data_size);
+
+ memcpy(data, (ilctxt->nd_read_page->data +
+ ilctxt->nd_read_page->hdr.nd_read_offset), bytes_to_read);
+
+ if (bytes_to_read != data_size) {
+ /* not enough space, wrap read to next page */
+ ilctxt->nd_read_page->hdr.nd_read_offset = 0;
+ ilctxt->nd_read_page = get_next_page(ilctxt,
+ ilctxt->nd_read_page);
+ BUG_ON(ilctxt->nd_read_page == NULL);
+
+ memcpy((data + bytes_to_read),
+ (ilctxt->nd_read_page->data +
+ ilctxt->nd_read_page->hdr.nd_read_offset),
+ (data_size - bytes_to_read));
+ bytes_to_read = (data_size - bytes_to_read);
+ }
+ ilctxt->nd_read_page->hdr.nd_read_offset += bytes_to_read;
+}
+
+/**
+ * ipc_log_drop - do destructive read of the log
+ *
+ * @ilctxt: Logging context
+ * @data: Data pointer to receive the data (or NULL)
+ * @data_size: Number of bytes to read (must be <= bytes available in log)
+ */
+static void ipc_log_drop(struct ipc_log_context *ilctxt, void *data,
+ int data_size)
+{
+ int bytes_to_read;
+ bool push_nd_read;
+
+ bytes_to_read = MIN(LOG_PAGE_DATA_SIZE
+ - ilctxt->read_page->hdr.read_offset,
+ data_size);
+ if (data)
+ memcpy(data, (ilctxt->read_page->data +
+ ilctxt->read_page->hdr.read_offset), bytes_to_read);
+
+ if (bytes_to_read != data_size) {
+ /* not enough space, wrap read to next page */
+ push_nd_read = is_nd_read_equal_read(ilctxt);
+
+ ilctxt->read_page->hdr.read_offset = 0;
+ if (push_nd_read) {
+ ilctxt->read_page->hdr.nd_read_offset = 0;
+ ilctxt->read_page = get_next_page(ilctxt,
+ ilctxt->read_page);
+ BUG_ON(ilctxt->read_page == NULL);
+ ilctxt->nd_read_page = ilctxt->read_page;
+ } else {
+ ilctxt->read_page = get_next_page(ilctxt,
+ ilctxt->read_page);
+ BUG_ON(ilctxt->read_page == NULL);
+ }
+
+ if (data)
+ memcpy((data + bytes_to_read),
+ (ilctxt->read_page->data +
+ ilctxt->read_page->hdr.read_offset),
+ (data_size - bytes_to_read));
+
+ bytes_to_read = (data_size - bytes_to_read);
+ }
+
+ /* update non-destructive read pointer if necessary */
+ push_nd_read = is_nd_read_equal_read(ilctxt);
+ ilctxt->read_page->hdr.read_offset += bytes_to_read;
+ ilctxt->write_avail += data_size;
+
+ if (push_nd_read)
+ ilctxt->nd_read_page->hdr.nd_read_offset += bytes_to_read;
+}
+
+/**
+ * msg_read - Reads a message.
+ *
+ * If a message is read successfully, then the message context
+ * will be set to:
+ * .hdr message header .size and .type values
+ * .offset beginning of message data
+ *
+ * @ilctxt Logging context
+ * @ectxt Message context
+ *
+ * @returns 0 - no message available; >0 message size; <0 error
+ */
+static int msg_read(struct ipc_log_context *ilctxt,
+ struct encode_context *ectxt)
+{
+ struct tsv_header hdr;
+
+ if (!ectxt)
+ return -EINVAL;
+
+ if (is_nd_read_empty(ilctxt))
+ return 0;
+
+ ipc_log_read(ilctxt, &hdr, sizeof(hdr));
+ ectxt->hdr.type = hdr.type;
+ ectxt->hdr.size = hdr.size;
+ ectxt->offset = sizeof(hdr);
+ ipc_log_read(ilctxt, (ectxt->buff + ectxt->offset),
+ (int)hdr.size);
+
+ return sizeof(hdr) + (int)hdr.size;
+}
+
+/**
+ * msg_drop - Drops a message.
+ *
+ * @ilctxt Logging context
+ */
+static void msg_drop(struct ipc_log_context *ilctxt)
+{
+ struct tsv_header hdr;
+
+ if (!is_read_empty(ilctxt)) {
+ ipc_log_drop(ilctxt, &hdr, sizeof(hdr));
+ ipc_log_drop(ilctxt, NULL, (int)hdr.size);
+ }
+}
+
+/*
+ * Commits messages to the FIFO. If the FIFO is full, then enough
+ * messages are dropped to create space for the new message.
+ */
+void ipc_log_write(void *ctxt, struct encode_context *ectxt)
+{
+ struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt;
+ int bytes_to_write;
+ unsigned long flags;
+
+ if (!ilctxt || !ectxt) {
+ pr_err("%s: Invalid ipc_log or encode context\n", __func__);
+ return;
+ }
+
+ read_lock_irqsave(&context_list_lock_lha1, flags);
+ spin_lock(&ilctxt->context_lock_lhb1);
+ while (ilctxt->write_avail <= ectxt->offset)
+ msg_drop(ilctxt);
+
+ bytes_to_write = MIN(LOG_PAGE_DATA_SIZE
+ - ilctxt->write_page->hdr.write_offset,
+ ectxt->offset);
+ memcpy((ilctxt->write_page->data +
+ ilctxt->write_page->hdr.write_offset),
+ ectxt->buff, bytes_to_write);
+
+ if (bytes_to_write != ectxt->offset) {
+ uint64_t t_now = sched_clock();
+
+ ilctxt->write_page->hdr.write_offset += bytes_to_write;
+ ilctxt->write_page->hdr.end_time = t_now;
+
+ ilctxt->write_page = get_next_page(ilctxt, ilctxt->write_page);
+ BUG_ON(ilctxt->write_page == NULL);
+ ilctxt->write_page->hdr.write_offset = 0;
+ ilctxt->write_page->hdr.start_time = t_now;
+ memcpy((ilctxt->write_page->data +
+ ilctxt->write_page->hdr.write_offset),
+ (ectxt->buff + bytes_to_write),
+ (ectxt->offset - bytes_to_write));
+ bytes_to_write = (ectxt->offset - bytes_to_write);
+ }
+ ilctxt->write_page->hdr.write_offset += bytes_to_write;
+ ilctxt->write_avail -= ectxt->offset;
+ complete(&ilctxt->read_avail);
+ spin_unlock(&ilctxt->context_lock_lhb1);
+ read_unlock_irqrestore(&context_list_lock_lha1, flags);
+}
+EXPORT_SYMBOL(ipc_log_write);
+
+/*
+ * Starts a new message after which you can add serialized data and
+ * then complete the message by calling msg_encode_end().
+ */
+void msg_encode_start(struct encode_context *ectxt, uint32_t type)
+{
+ if (!ectxt) {
+ pr_err("%s: Invalid encode context\n", __func__);
+ return;
+ }
+
+ ectxt->hdr.type = type;
+ ectxt->hdr.size = 0;
+ ectxt->offset = sizeof(ectxt->hdr);
+}
+EXPORT_SYMBOL(msg_encode_start);
+
+/*
+ * Completes the message
+ */
+void msg_encode_end(struct encode_context *ectxt)
+{
+ if (!ectxt) {
+ pr_err("%s: Invalid encode context\n", __func__);
+ return;
+ }
+
+ /* finalize data size */
+ ectxt->hdr.size = ectxt->offset - sizeof(ectxt->hdr);
+ BUG_ON(ectxt->hdr.size > MAX_MSG_SIZE);
+ memcpy(ectxt->buff, &ectxt->hdr, sizeof(ectxt->hdr));
+}
+EXPORT_SYMBOL(msg_encode_end);
+
+/*
+ * Helper funtion used to write data to a message context.
+ *
+ * @ectxt context initialized by calling msg_encode_start()
+ * @data data to write
+ * @size number of bytes of data to write
+ */
+static inline int tsv_write_data(struct encode_context *ectxt,
+ void *data, uint32_t size)
+{
+ if (!ectxt) {
+ pr_err("%s: Invalid encode context\n", __func__);
+ return -EINVAL;
+ }
+ if ((ectxt->offset + size) > MAX_MSG_SIZE) {
+ pr_err("%s: No space to encode further\n", __func__);
+ return -EINVAL;
+ }
+
+ memcpy((void *)(ectxt->buff + ectxt->offset), data, size);
+ ectxt->offset += size;
+ return 0;
+}
+
+/*
+ * Helper function that writes a type to the context.
+ *
+ * @ectxt context initialized by calling msg_encode_start()
+ * @type primitive type
+ * @size size of primitive in bytes
+ */
+static inline int tsv_write_header(struct encode_context *ectxt,
+ uint32_t type, uint32_t size)
+{
+ struct tsv_header hdr;
+
+ hdr.type = (unsigned char)type;
+ hdr.size = (unsigned char)size;
+ return tsv_write_data(ectxt, &hdr, sizeof(hdr));
+}
+
+/*
+ * Writes the current timestamp count.
+ *
+ * @ectxt context initialized by calling msg_encode_start()
+ */
+int tsv_timestamp_write(struct encode_context *ectxt)
+{
+ int ret;
+ uint64_t t_now = sched_clock();
+
+ ret = tsv_write_header(ectxt, TSV_TYPE_TIMESTAMP, sizeof(t_now));
+ if (ret)
+ return ret;
+ return tsv_write_data(ectxt, &t_now, sizeof(t_now));
+}
+EXPORT_SYMBOL(tsv_timestamp_write);
+
+/*
+ * Writes the current QTimer timestamp count.
+ *
+ * @ectxt context initialized by calling msg_encode_start()
+ */
+int tsv_qtimer_write(struct encode_context *ectxt)
+{
+ int ret;
+ uint64_t t_now = arch_counter_get_cntvct();
+
+ ret = tsv_write_header(ectxt, TSV_TYPE_QTIMER, sizeof(t_now));
+ if (ret)
+ return ret;
+ return tsv_write_data(ectxt, &t_now, sizeof(t_now));
+}
+EXPORT_SYMBOL(tsv_qtimer_write);
+
+/*
+ * Writes a data pointer.
+ *
+ * @ectxt context initialized by calling msg_encode_start()
+ * @pointer pointer value to write
+ */
+int tsv_pointer_write(struct encode_context *ectxt, void *pointer)
+{
+ int ret;
+ ret = tsv_write_header(ectxt, TSV_TYPE_POINTER, sizeof(pointer));
+ if (ret)
+ return ret;
+ return tsv_write_data(ectxt, &pointer, sizeof(pointer));
+}
+EXPORT_SYMBOL(tsv_pointer_write);
+
+/*
+ * Writes a 32-bit integer value.
+ *
+ * @ectxt context initialized by calling msg_encode_start()
+ * @n integer to write
+ */
+int tsv_int32_write(struct encode_context *ectxt, int32_t n)
+{
+ int ret;
+ ret = tsv_write_header(ectxt, TSV_TYPE_INT32, sizeof(n));
+ if (ret)
+ return ret;
+ return tsv_write_data(ectxt, &n, sizeof(n));
+}
+EXPORT_SYMBOL(tsv_int32_write);
+
+/*
+ * Writes a byte array.
+ *
+ * @ectxt context initialized by calling msg_write_start()
+ * @data Beginning address of data
+ * @data_size Size of data to be written
+ */
+int tsv_byte_array_write(struct encode_context *ectxt,
+ void *data, int data_size)
+{
+ int ret;
+ ret = tsv_write_header(ectxt, TSV_TYPE_BYTE_ARRAY, data_size);
+ if (ret)
+ return ret;
+ return tsv_write_data(ectxt, data, data_size);
+}
+EXPORT_SYMBOL(tsv_byte_array_write);
+
+/*
+ * Helper function to log a string
+ *
+ * @ilctxt ipc_log_context created using ipc_log_context_create()
+ * @fmt Data specified using format specifiers
+ */
+int ipc_log_string(void *ilctxt, const char *fmt, ...)
+{
+ struct encode_context ectxt;
+ int avail_size, data_size, hdr_size = sizeof(struct tsv_header);
+ va_list arg_list;
+
+ if (!ilctxt)
+ return -EINVAL;
+
+ msg_encode_start(&ectxt, TSV_TYPE_STRING);
+ tsv_timestamp_write(&ectxt);
+ tsv_qtimer_write(&ectxt);
+ avail_size = (MAX_MSG_SIZE - (ectxt.offset + hdr_size));
+ va_start(arg_list, fmt);
+ data_size = vscnprintf((ectxt.buff + ectxt.offset + hdr_size),
+ avail_size, fmt, arg_list);
+ va_end(arg_list);
+ tsv_write_header(&ectxt, TSV_TYPE_BYTE_ARRAY, data_size);
+ ectxt.offset += data_size;
+ msg_encode_end(&ectxt);
+ ipc_log_write(ilctxt, &ectxt);
+ return 0;
+}
+EXPORT_SYMBOL(ipc_log_string);
+
+/**
+ * ipc_log_extract - Reads and deserializes log
+ *
+ * @ctxt: logging context
+ * @buff: buffer to receive the data
+ * @size: size of the buffer
+ * @returns: 0 if no data read; >0 number of bytes read; < 0 error
+ *
+ * If no data is available to be read, then the ilctxt::read_avail
+ * completion is reinitialized. This allows clients to block
+ * until new log data is save.
+ */
+int ipc_log_extract(void *ctxt, char *buff, int size)
+{
+ struct encode_context ectxt;
+ struct decode_context dctxt;
+ void (*deserialize_func)(struct encode_context *ectxt,
+ struct decode_context *dctxt);
+ struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt;
+ unsigned long flags;
+
+ if (size < MAX_MSG_DECODED_SIZE)
+ return -EINVAL;
+
+ dctxt.output_format = OUTPUT_DEBUGFS;
+ dctxt.buff = buff;
+ dctxt.size = size;
+ read_lock_irqsave(&context_list_lock_lha1, flags);
+ spin_lock(&ilctxt->context_lock_lhb1);
+ while (dctxt.size >= MAX_MSG_DECODED_SIZE &&
+ !is_nd_read_empty(ilctxt)) {
+ msg_read(ilctxt, &ectxt);
+ deserialize_func = get_deserialization_func(ilctxt,
+ ectxt.hdr.type);
+ spin_unlock(&ilctxt->context_lock_lhb1);
+ read_unlock_irqrestore(&context_list_lock_lha1, flags);
+ if (deserialize_func)
+ deserialize_func(&ectxt, &dctxt);
+ else
+ pr_err("%s: unknown message 0x%x\n",
+ __func__, ectxt.hdr.type);
+ read_lock_irqsave(&context_list_lock_lha1, flags);
+ spin_lock(&ilctxt->context_lock_lhb1);
+ }
+ if ((size - dctxt.size) == 0)
+ reinit_completion(&ilctxt->read_avail);
+ spin_unlock(&ilctxt->context_lock_lhb1);
+ read_unlock_irqrestore(&context_list_lock_lha1, flags);
+ return size - dctxt.size;
+}
+EXPORT_SYMBOL(ipc_log_extract);
+
+/*
+ * Helper funtion used to read data from a message context.
+ *
+ * @ectxt context initialized by calling msg_read()
+ * @data data to read
+ * @size number of bytes of data to read
+ */
+static void tsv_read_data(struct encode_context *ectxt,
+ void *data, uint32_t size)
+{
+ BUG_ON((ectxt->offset + size) > MAX_MSG_SIZE);
+ memcpy(data, (ectxt->buff + ectxt->offset), size);
+ ectxt->offset += size;
+}
+
+/*
+ * Helper function that reads a type from the context and updates the
+ * context pointers.
+ *
+ * @ectxt context initialized by calling msg_read()
+ * @hdr type header
+ */
+static void tsv_read_header(struct encode_context *ectxt,
+ struct tsv_header *hdr)
+{
+ BUG_ON((ectxt->offset + sizeof(*hdr)) > MAX_MSG_SIZE);
+ memcpy(hdr, (ectxt->buff + ectxt->offset), sizeof(*hdr));
+ ectxt->offset += sizeof(*hdr);
+}
+
+/*
+ * Reads a timestamp.
+ *
+ * @ectxt context initialized by calling msg_read()
+ * @dctxt deserialization context
+ * @format output format (appended to %6u.09u timestamp format)
+ */
+void tsv_timestamp_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format)
+{
+ struct tsv_header hdr;
+ uint64_t val;
+ unsigned long nanosec_rem;
+
+ tsv_read_header(ectxt, &hdr);
+ BUG_ON(hdr.type != TSV_TYPE_TIMESTAMP);
+ tsv_read_data(ectxt, &val, sizeof(val));
+ nanosec_rem = do_div(val, 1000000000U);
+ IPC_SPRINTF_DECODE(dctxt, "[%6u.%09lu%s/",
+ (unsigned)val, nanosec_rem, format);
+}
+EXPORT_SYMBOL(tsv_timestamp_read);
+
+/*
+ * Reads a QTimer timestamp.
+ *
+ * @ectxt context initialized by calling msg_read()
+ * @dctxt deserialization context
+ * @format output format (appended to %#18llx timestamp format)
+ */
+void tsv_qtimer_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format)
+{
+ struct tsv_header hdr;
+ uint64_t val;
+
+ tsv_read_header(ectxt, &hdr);
+ BUG_ON(hdr.type != TSV_TYPE_QTIMER);
+ tsv_read_data(ectxt, &val, sizeof(val));
+
+ /*
+ * This gives 16 hex digits of output. The # prefix prepends
+ * a 0x, and these characters count as part of the number.
+ */
+ IPC_SPRINTF_DECODE(dctxt, "%#18llx]%s", val, format);
+}
+EXPORT_SYMBOL(tsv_qtimer_read);
+
+/*
+ * Reads a data pointer.
+ *
+ * @ectxt context initialized by calling msg_read()
+ * @dctxt deserialization context
+ * @format output format
+ */
+void tsv_pointer_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format)
+{
+ struct tsv_header hdr;
+ void *val;
+
+ tsv_read_header(ectxt, &hdr);
+ BUG_ON(hdr.type != TSV_TYPE_POINTER);
+ tsv_read_data(ectxt, &val, sizeof(val));
+
+ IPC_SPRINTF_DECODE(dctxt, format, val);
+}
+EXPORT_SYMBOL(tsv_pointer_read);
+
+/*
+ * Reads a 32-bit integer value.
+ *
+ * @ectxt context initialized by calling msg_read()
+ * @dctxt deserialization context
+ * @format output format
+ */
+int32_t tsv_int32_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format)
+{
+ struct tsv_header hdr;
+ int32_t val;
+
+ tsv_read_header(ectxt, &hdr);
+ BUG_ON(hdr.type != TSV_TYPE_INT32);
+ tsv_read_data(ectxt, &val, sizeof(val));
+
+ IPC_SPRINTF_DECODE(dctxt, format, val);
+ return val;
+}
+EXPORT_SYMBOL(tsv_int32_read);
+
+/*
+ * Reads a byte array/string.
+ *
+ * @ectxt context initialized by calling msg_read()
+ * @dctxt deserialization context
+ * @format output format
+ */
+void tsv_byte_array_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format)
+{
+ struct tsv_header hdr;
+
+ tsv_read_header(ectxt, &hdr);
+ BUG_ON(hdr.type != TSV_TYPE_BYTE_ARRAY);
+ tsv_read_data(ectxt, dctxt->buff, hdr.size);
+ dctxt->buff += hdr.size;
+ dctxt->size -= hdr.size;
+}
+EXPORT_SYMBOL(tsv_byte_array_read);
+
+int add_deserialization_func(void *ctxt, int type,
+ void (*dfunc)(struct encode_context *,
+ struct decode_context *))
+{
+ struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt;
+ struct dfunc_info *df_info;
+ unsigned long flags;
+
+ if (!ilctxt || !dfunc)
+ return -EINVAL;
+
+ df_info = kmalloc(sizeof(struct dfunc_info), GFP_KERNEL);
+ if (!df_info)
+ return -ENOSPC;
+
+ read_lock_irqsave(&context_list_lock_lha1, flags);
+ spin_lock(&ilctxt->context_lock_lhb1);
+ df_info->type = type;
+ df_info->dfunc = dfunc;
+ list_add_tail(&df_info->list, &ilctxt->dfunc_info_list);
+ spin_unlock(&ilctxt->context_lock_lhb1);
+ read_unlock_irqrestore(&context_list_lock_lha1, flags);
+ return 0;
+}
+EXPORT_SYMBOL(add_deserialization_func);
+
+static void *get_deserialization_func(struct ipc_log_context *ilctxt,
+ int type)
+{
+ struct dfunc_info *df_info = NULL;
+
+ if (!ilctxt)
+ return NULL;
+
+ list_for_each_entry(df_info, &ilctxt->dfunc_info_list, list) {
+ if (df_info->type == type)
+ return df_info->dfunc;
+ }
+ return NULL;
+}
+
+/**
+ * ipc_log_context_create: Create a debug log context
+ * Should not be called from atomic context
+ *
+ * @max_num_pages: Number of pages of logging space required (max. 10)
+ * @mod_name : Name of the directory entry under DEBUGFS
+ * @user_version : Version number of user-defined message formats
+ *
+ * returns context id on success, NULL on failure
+ */
+void *ipc_log_context_create(int max_num_pages,
+ const char *mod_name, uint16_t user_version)
+{
+ struct ipc_log_context *ctxt;
+ struct ipc_log_page *pg = NULL;
+ int page_cnt;
+ unsigned long flags;
+
+ ctxt = kzalloc(sizeof(struct ipc_log_context), GFP_KERNEL);
+ if (!ctxt) {
+ pr_err("%s: cannot create ipc_log_context\n", __func__);
+ return 0;
+ }
+
+ init_completion(&ctxt->read_avail);
+ INIT_LIST_HEAD(&ctxt->page_list);
+ INIT_LIST_HEAD(&ctxt->dfunc_info_list);
+ spin_lock_init(&ctxt->context_lock_lhb1);
+ for (page_cnt = 0; page_cnt < max_num_pages; page_cnt++) {
+ pg = kzalloc(sizeof(struct ipc_log_page), GFP_KERNEL);
+ if (!pg) {
+ pr_err("%s: cannot create ipc_log_page\n", __func__);
+ goto release_ipc_log_context;
+ }
+ pg->hdr.log_id = (uint64_t)(uintptr_t)ctxt;
+ pg->hdr.page_num = LOG_PAGE_FLAG | page_cnt;
+ pg->hdr.ctx_offset = (int64_t)((uint64_t)(uintptr_t)ctxt -
+ (uint64_t)(uintptr_t)&pg->hdr);
+
+ /* set magic last to signal that page init is complete */
+ pg->hdr.magic = IPC_LOGGING_MAGIC_NUM;
+ pg->hdr.nmagic = ~(IPC_LOGGING_MAGIC_NUM);
+
+ spin_lock_irqsave(&ctxt->context_lock_lhb1, flags);
+ list_add_tail(&pg->hdr.list, &ctxt->page_list);
+ spin_unlock_irqrestore(&ctxt->context_lock_lhb1, flags);
+ }
+
+ ctxt->log_id = (uint64_t)(uintptr_t)ctxt;
+ ctxt->version = IPC_LOG_VERSION;
+ strlcpy(ctxt->name, mod_name, IPC_LOG_MAX_CONTEXT_NAME_LEN);
+ ctxt->user_version = user_version;
+ ctxt->first_page = get_first_page(ctxt);
+ ctxt->last_page = pg;
+ ctxt->write_page = ctxt->first_page;
+ ctxt->read_page = ctxt->first_page;
+ ctxt->nd_read_page = ctxt->first_page;
+ ctxt->write_avail = max_num_pages * LOG_PAGE_DATA_SIZE;
+ ctxt->header_size = sizeof(struct ipc_log_page_header);
+ create_ctx_debugfs(ctxt, mod_name);
+
+ /* set magic last to signal context init is complete */
+ ctxt->magic = IPC_LOG_CONTEXT_MAGIC_NUM;
+ ctxt->nmagic = ~(IPC_LOG_CONTEXT_MAGIC_NUM);
+
+ write_lock_irqsave(&context_list_lock_lha1, flags);
+ list_add_tail(&ctxt->list, &ipc_log_context_list);
+ write_unlock_irqrestore(&context_list_lock_lha1, flags);
+ return (void *)ctxt;
+
+release_ipc_log_context:
+ while (page_cnt-- > 0) {
+ pg = get_first_page(ctxt);
+ list_del(&pg->hdr.list);
+ kfree(pg);
+ }
+ kfree(ctxt);
+ return 0;
+}
+EXPORT_SYMBOL(ipc_log_context_create);
+
+/*
+ * Destroy debug log context
+ *
+ * @ctxt: debug log context created by calling ipc_log_context_create API.
+ */
+int ipc_log_context_destroy(void *ctxt)
+{
+ struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt;
+ struct ipc_log_page *pg = NULL;
+ unsigned long flags;
+
+ if (!ilctxt)
+ return 0;
+
+ while (!list_empty(&ilctxt->page_list)) {
+ pg = get_first_page(ctxt);
+ list_del(&pg->hdr.list);
+ kfree(pg);
+ }
+
+ write_lock_irqsave(&context_list_lock_lha1, flags);
+ list_del(&ilctxt->list);
+ write_unlock_irqrestore(&context_list_lock_lha1, flags);
+
+ debugfs_remove_recursive(ilctxt->dent);
+
+ kfree(ilctxt);
+ return 0;
+}
+EXPORT_SYMBOL(ipc_log_context_destroy);
+
+static int __init ipc_logging_init(void)
+{
+ check_and_create_debugfs();
+ return 0;
+}
+
+module_init(ipc_logging_init);
+
+MODULE_DESCRIPTION("ipc logging");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/trace/ipc_logging_debug.c b/kernel/trace/ipc_logging_debug.c
new file mode 100644
index 000000000000..a54538798f2b
--- /dev/null
+++ b/kernel/trace/ipc_logging_debug.c
@@ -0,0 +1,184 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/idr.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/ipc_logging.h>
+
+#include "ipc_logging_private.h"
+
+static DEFINE_MUTEX(ipc_log_debugfs_init_lock);
+static struct dentry *root_dent;
+
+static int debug_log(struct ipc_log_context *ilctxt,
+ char *buff, int size, int cont)
+{
+ int i = 0;
+ int ret;
+
+ if (size < MAX_MSG_DECODED_SIZE) {
+ pr_err("%s: buffer size %d < %d\n", __func__, size,
+ MAX_MSG_DECODED_SIZE);
+ return -ENOMEM;
+ }
+ do {
+ i = ipc_log_extract(ilctxt, buff, size - 1);
+ if (cont && i == 0) {
+ ret = wait_for_completion_interruptible(
+ &ilctxt->read_avail);
+ if (ret < 0)
+ return ret;
+ }
+ } while (cont && i == 0);
+
+ return i;
+}
+
+/*
+ * VFS Read operation helper which dispatches the call to the debugfs
+ * read command stored in file->private_data.
+ *
+ * @file File structure
+ * @buff user buffer
+ * @count size of user buffer
+ * @ppos file position to read from (only a value of 0 is accepted)
+ * @cont 1 = continuous mode (don't return 0 to signal end-of-file)
+ *
+ * @returns ==0 end of file
+ * >0 number of bytes read
+ * <0 error
+ */
+static ssize_t debug_read_helper(struct file *file, char __user *buff,
+ size_t count, loff_t *ppos, int cont)
+{
+ struct ipc_log_context *ilctxt = file->private_data;
+ char *buffer;
+ int bsize;
+
+ buffer = kmalloc(count, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ bsize = debug_log(ilctxt, buffer, count, cont);
+ if (bsize > 0) {
+ if (copy_to_user(buff, buffer, bsize)) {
+ kfree(buffer);
+ return -EFAULT;
+ }
+ *ppos += bsize;
+ }
+ kfree(buffer);
+ return bsize;
+}
+
+static ssize_t debug_read(struct file *file, char __user *buff,
+ size_t count, loff_t *ppos)
+{
+ return debug_read_helper(file, buff, count, ppos, 0);
+}
+
+static ssize_t debug_read_cont(struct file *file, char __user *buff,
+ size_t count, loff_t *ppos)
+{
+ return debug_read_helper(file, buff, count, ppos, 1);
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations debug_ops = {
+ .read = debug_read,
+ .open = debug_open,
+};
+
+static const struct file_operations debug_ops_cont = {
+ .read = debug_read_cont,
+ .open = debug_open,
+};
+
+static void debug_create(const char *name, mode_t mode,
+ struct dentry *dent,
+ struct ipc_log_context *ilctxt,
+ const struct file_operations *fops)
+{
+ debugfs_create_file(name, mode, dent, ilctxt, fops);
+}
+
+static void dfunc_string(struct encode_context *ectxt,
+ struct decode_context *dctxt)
+{
+ tsv_timestamp_read(ectxt, dctxt, "");
+ tsv_qtimer_read(ectxt, dctxt, " ");
+ tsv_byte_array_read(ectxt, dctxt, "");
+
+ /* add trailing \n if necessary */
+ if (*(dctxt->buff - 1) != '\n') {
+ if (dctxt->size) {
+ ++dctxt->buff;
+ --dctxt->size;
+ }
+ *(dctxt->buff - 1) = '\n';
+ }
+}
+
+void check_and_create_debugfs(void)
+{
+ mutex_lock(&ipc_log_debugfs_init_lock);
+ if (!root_dent) {
+ root_dent = debugfs_create_dir("ipc_logging", 0);
+
+ if (IS_ERR(root_dent)) {
+ pr_err("%s: unable to create debugfs %ld\n",
+ __func__, PTR_ERR(root_dent));
+ root_dent = NULL;
+ }
+ }
+ mutex_unlock(&ipc_log_debugfs_init_lock);
+}
+EXPORT_SYMBOL(check_and_create_debugfs);
+
+void create_ctx_debugfs(struct ipc_log_context *ctxt,
+ const char *mod_name)
+{
+ if (!root_dent)
+ check_and_create_debugfs();
+
+ if (root_dent) {
+ ctxt->dent = debugfs_create_dir(mod_name, root_dent);
+ if (!IS_ERR(ctxt->dent)) {
+ debug_create("log", 0444, ctxt->dent,
+ ctxt, &debug_ops);
+ debug_create("log_cont", 0444, ctxt->dent,
+ ctxt, &debug_ops_cont);
+ }
+ }
+ add_deserialization_func((void *)ctxt,
+ TSV_TYPE_STRING, dfunc_string);
+}
+EXPORT_SYMBOL(create_ctx_debugfs);
diff --git a/kernel/trace/ipc_logging_private.h b/kernel/trace/ipc_logging_private.h
new file mode 100644
index 000000000000..3ac950695086
--- /dev/null
+++ b/kernel/trace/ipc_logging_private.h
@@ -0,0 +1,165 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _IPC_LOGGING_PRIVATE_H
+#define _IPC_LOGGING_PRIVATE_H
+
+#include <linux/ipc_logging.h>
+
+#define IPC_LOG_VERSION 0x0003
+#define IPC_LOG_MAX_CONTEXT_NAME_LEN 32
+
+/**
+ * struct ipc_log_page_header - Individual log page header
+ *
+ * @magic: Magic number (used for log extraction)
+ * @nmagic: Inverse of magic number (used for log extraction)
+ * @page_num: Index of page (0.. N - 1) (note top bit is always set)
+ * @read_offset: Read offset in page
+ * @write_offset: Write offset in page (or 0xFFFF if full)
+ * @log_id: ID of logging context that owns this page
+ * @start_time: Scheduler clock for first write time in page
+ * @end_time: Scheduler clock for last write time in page
+ * @ctx_offset: Signed offset from page to the logging context. Used to
+ * optimize ram-dump extraction.
+ *
+ * @list: Linked list of pages that make up a log
+ * @nd_read_offset: Non-destructive read offset used for debugfs
+ *
+ * The first part of the structure defines data that is used to extract the
+ * logs from a memory dump and elements in this section should not be changed
+ * or re-ordered. New local data structures can be added to the end of the
+ * structure since they will be ignored by the extraction tool.
+ */
+struct ipc_log_page_header {
+ uint32_t magic;
+ uint32_t nmagic;
+ uint32_t page_num;
+ uint16_t read_offset;
+ uint16_t write_offset;
+ uint64_t log_id;
+ uint64_t start_time;
+ uint64_t end_time;
+ int64_t ctx_offset;
+
+ /* add local data structures after this point */
+ struct list_head list;
+ uint16_t nd_read_offset;
+};
+
+/**
+ * struct ipc_log_page - Individual log page
+ *
+ * @hdr: Log page header
+ * @data: Log data
+ *
+ * Each log consists of 1 to N log pages. Data size is adjusted to always fit
+ * the structure into a single kernel page.
+ */
+struct ipc_log_page {
+ struct ipc_log_page_header hdr;
+ char data[PAGE_SIZE - sizeof(struct ipc_log_page_header)];
+};
+
+/**
+ * struct ipc_log_context - main logging context
+ *
+ * @magic: Magic number (used for log extraction)
+ * @nmagic: Inverse of magic number (used for log extraction)
+ * @version: IPC Logging version of log format
+ * @user_version: Version number for user-defined messages
+ * @header_size: Size of the log header which is used to determine the offset
+ * of ipc_log_page::data
+ * @log_id: Log ID (assigned when log is created)
+ * @name: Name of the log used to uniquely identify the log during extraction
+ *
+ * @list: List of log contexts (struct ipc_log_context)
+ * @page_list: List of log pages (struct ipc_log_page)
+ * @first_page: First page in list of logging pages
+ * @last_page: Last page in list of logging pages
+ * @write_page: Current write page
+ * @read_page: Current read page (for internal reads)
+ * @nd_read_page: Current debugfs extraction page (non-destructive)
+ *
+ * @write_avail: Number of bytes available to write in all pages
+ * @dent: Debugfs node for run-time log extraction
+ * @dfunc_info_list: List of deserialization functions
+ * @context_lock_lhb1: Lock for entire structure
+ * @read_avail: Completed when new data is added to the log
+ */
+struct ipc_log_context {
+ uint32_t magic;
+ uint32_t nmagic;
+ uint32_t version;
+ uint16_t user_version;
+ uint16_t header_size;
+ uint64_t log_id;
+ char name[IPC_LOG_MAX_CONTEXT_NAME_LEN];
+
+ /* add local data structures after this point */
+ struct list_head list;
+ struct list_head page_list;
+ struct ipc_log_page *first_page;
+ struct ipc_log_page *last_page;
+ struct ipc_log_page *write_page;
+ struct ipc_log_page *read_page;
+ struct ipc_log_page *nd_read_page;
+
+ uint32_t write_avail;
+ struct dentry *dent;
+ struct list_head dfunc_info_list;
+ spinlock_t context_lock_lhb1;
+ struct completion read_avail;
+};
+
+struct dfunc_info {
+ struct list_head list;
+ int type;
+ void (*dfunc) (struct encode_context *, struct decode_context *);
+};
+
+enum {
+ TSV_TYPE_INVALID,
+ TSV_TYPE_TIMESTAMP,
+ TSV_TYPE_POINTER,
+ TSV_TYPE_INT32,
+ TSV_TYPE_BYTE_ARRAY,
+ TSV_TYPE_QTIMER,
+};
+
+enum {
+ OUTPUT_DEBUGFS,
+};
+
+#define IPC_LOG_CONTEXT_MAGIC_NUM 0x25874452
+#define IPC_LOGGING_MAGIC_NUM 0x52784425
+#define MIN(x, y) ((x) < (y) ? (x) : (y))
+#define IS_MSG_TYPE(x) (((x) > TSV_TYPE_MSG_START) && \
+ ((x) < TSV_TYPE_MSG_END))
+#define MAX_MSG_DECODED_SIZE (MAX_MSG_SIZE*4)
+
+#if (defined(CONFIG_DEBUG_FS))
+void check_and_create_debugfs(void);
+
+void create_ctx_debugfs(struct ipc_log_context *ctxt,
+ const char *mod_name);
+#else
+void check_and_create_debugfs(void)
+{
+}
+
+void create_ctx_debugfs(struct ipc_log_context *ctxt, const char *mod_name)
+{
+}
+#endif
+
+#endif
diff --git a/kernel/trace/msm_rtb.c b/kernel/trace/msm_rtb.c
new file mode 100644
index 000000000000..587082117842
--- /dev/null
+++ b/kernel/trace/msm_rtb.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/atomic.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/atomic.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <asm-generic/sizes.h>
+#include <linux/msm_rtb.h>
+#include <asm/timex.h>
+#include <soc/qcom/minidump.h>
+
+#define SENTINEL_BYTE_1 0xFF
+#define SENTINEL_BYTE_2 0xAA
+#define SENTINEL_BYTE_3 0xFF
+
+#define RTB_COMPAT_STR "qcom,msm-rtb"
+
+/* Write
+ * 1) 3 bytes sentinel
+ * 2) 1 bytes of log type
+ * 3) 8 bytes of where the caller came from
+ * 4) 4 bytes index
+ * 4) 8 bytes extra data from the caller
+ * 5) 8 bytes of timestamp
+ * 6) 8 bytes of cyclecount
+ *
+ * Total = 40 bytes.
+ */
+struct msm_rtb_layout {
+ unsigned char sentinel[3];
+ unsigned char log_type;
+ uint32_t idx;
+ uint64_t caller;
+ uint64_t data;
+ uint64_t timestamp;
+ uint64_t cycle_count;
+} __attribute__ ((__packed__));
+
+
+struct msm_rtb_state {
+ struct msm_rtb_layout *rtb;
+ phys_addr_t phys;
+ int nentries;
+ int size;
+ int enabled;
+ int initialized;
+ uint32_t filter;
+ int step_size;
+};
+
+#if defined(CONFIG_QCOM_RTB_SEPARATE_CPUS)
+DEFINE_PER_CPU(atomic_t, msm_rtb_idx_cpu);
+#else
+static atomic_t msm_rtb_idx;
+#endif
+
+static struct msm_rtb_state msm_rtb = {
+ .filter = 1 << LOGK_LOGBUF,
+ .enabled = 1,
+};
+
+module_param_named(filter, msm_rtb.filter, uint, 0644);
+module_param_named(enable, msm_rtb.enabled, int, 0644);
+
+static int msm_rtb_panic_notifier(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ msm_rtb.enabled = 0;
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block msm_rtb_panic_blk = {
+ .notifier_call = msm_rtb_panic_notifier,
+ .priority = INT_MAX,
+};
+
+int notrace msm_rtb_event_should_log(enum logk_event_type log_type)
+{
+ return msm_rtb.initialized && msm_rtb.enabled &&
+ ((1 << (log_type & ~LOGTYPE_NOPC)) & msm_rtb.filter);
+}
+EXPORT_SYMBOL(msm_rtb_event_should_log);
+
+static void msm_rtb_emit_sentinel(struct msm_rtb_layout *start)
+{
+ start->sentinel[0] = SENTINEL_BYTE_1;
+ start->sentinel[1] = SENTINEL_BYTE_2;
+ start->sentinel[2] = SENTINEL_BYTE_3;
+}
+
+static void msm_rtb_write_type(enum logk_event_type log_type,
+ struct msm_rtb_layout *start)
+{
+ start->log_type = (char)log_type;
+}
+
+static void msm_rtb_write_caller(uint64_t caller, struct msm_rtb_layout *start)
+{
+ start->caller = caller;
+}
+
+static void msm_rtb_write_idx(uint32_t idx,
+ struct msm_rtb_layout *start)
+{
+ start->idx = idx;
+}
+
+static void msm_rtb_write_data(uint64_t data, struct msm_rtb_layout *start)
+{
+ start->data = data;
+}
+
+static void msm_rtb_write_timestamp(struct msm_rtb_layout *start)
+{
+ start->timestamp = sched_clock();
+}
+
+static void msm_rtb_write_cyclecount(struct msm_rtb_layout *start)
+{
+ start->cycle_count = get_cycles();
+}
+
+static void uncached_logk_pc_idx(enum logk_event_type log_type, uint64_t caller,
+ uint64_t data, int idx)
+{
+ struct msm_rtb_layout *start;
+
+ start = &msm_rtb.rtb[idx & (msm_rtb.nentries - 1)];
+
+ msm_rtb_emit_sentinel(start);
+ msm_rtb_write_type(log_type, start);
+ msm_rtb_write_caller(caller, start);
+ msm_rtb_write_idx(idx, start);
+ msm_rtb_write_data(data, start);
+ msm_rtb_write_timestamp(start);
+ msm_rtb_write_cyclecount(start);
+ mb();
+
+ return;
+}
+
+static void uncached_logk_timestamp(int idx)
+{
+ unsigned long long timestamp;
+
+ timestamp = sched_clock();
+ uncached_logk_pc_idx(LOGK_TIMESTAMP|LOGTYPE_NOPC,
+ (uint64_t)lower_32_bits(timestamp),
+ (uint64_t)upper_32_bits(timestamp), idx);
+}
+
+#if defined(CONFIG_QCOM_RTB_SEPARATE_CPUS)
+static int msm_rtb_get_idx(void)
+{
+ int cpu, i, offset;
+ atomic_t *index;
+
+ /*
+ * ideally we would use get_cpu but this is a close enough
+ * approximation for our purposes.
+ */
+ cpu = raw_smp_processor_id();
+
+ index = &per_cpu(msm_rtb_idx_cpu, cpu);
+
+ i = atomic_add_return(msm_rtb.step_size, index);
+ i -= msm_rtb.step_size;
+
+ /* Check if index has wrapped around */
+ offset = (i & (msm_rtb.nentries - 1)) -
+ ((i - msm_rtb.step_size) & (msm_rtb.nentries - 1));
+ if (offset < 0) {
+ uncached_logk_timestamp(i);
+ i = atomic_add_return(msm_rtb.step_size, index);
+ i -= msm_rtb.step_size;
+ }
+
+ return i;
+}
+#else
+static int msm_rtb_get_idx(void)
+{
+ int i, offset;
+
+ i = atomic_inc_return(&msm_rtb_idx);
+ i--;
+
+ /* Check if index has wrapped around */
+ offset = (i & (msm_rtb.nentries - 1)) -
+ ((i - 1) & (msm_rtb.nentries - 1));
+ if (offset < 0) {
+ uncached_logk_timestamp(i);
+ i = atomic_inc_return(&msm_rtb_idx);
+ i--;
+ }
+
+ return i;
+}
+#endif
+
+int notrace uncached_logk_pc(enum logk_event_type log_type, void *caller,
+ void *data)
+{
+ int i;
+
+ if (!msm_rtb_event_should_log(log_type))
+ return 0;
+
+ i = msm_rtb_get_idx();
+ uncached_logk_pc_idx(log_type, (uint64_t)((unsigned long) caller),
+ (uint64_t)((unsigned long) data), i);
+
+ return 1;
+}
+EXPORT_SYMBOL(uncached_logk_pc);
+
+noinline int notrace uncached_logk(enum logk_event_type log_type, void *data)
+{
+ return uncached_logk_pc(log_type, __builtin_return_address(0), data);
+}
+EXPORT_SYMBOL(uncached_logk);
+
+static int msm_rtb_probe(struct platform_device *pdev)
+{
+ struct msm_rtb_platform_data *d = pdev->dev.platform_data;
+ struct md_region md_entry;
+#if defined(CONFIG_QCOM_RTB_SEPARATE_CPUS)
+ unsigned int cpu;
+#endif
+ int ret;
+
+ if (!pdev->dev.of_node) {
+ msm_rtb.size = d->size;
+ } else {
+ u64 size;
+ struct device_node *pnode;
+
+ pnode = of_parse_phandle(pdev->dev.of_node,
+ "linux,contiguous-region", 0);
+ if (pnode != NULL) {
+ const u32 *addr;
+
+ addr = of_get_address(pnode, 0, &size, NULL);
+ if (!addr) {
+ of_node_put(pnode);
+ return -EINVAL;
+ }
+ of_node_put(pnode);
+ } else {
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,rtb-size",
+ (u32 *)&size);
+ if (ret < 0)
+ return ret;
+
+ }
+
+ msm_rtb.size = size;
+ }
+
+ if (msm_rtb.size <= 0 || msm_rtb.size > SZ_1M)
+ return -EINVAL;
+
+ msm_rtb.rtb = dma_alloc_coherent(&pdev->dev, msm_rtb.size,
+ &msm_rtb.phys,
+ GFP_KERNEL);
+
+ if (!msm_rtb.rtb)
+ return -ENOMEM;
+
+ msm_rtb.nentries = msm_rtb.size / sizeof(struct msm_rtb_layout);
+
+ /* Round this down to a power of 2 */
+ msm_rtb.nentries = __rounddown_pow_of_two(msm_rtb.nentries);
+
+ memset(msm_rtb.rtb, 0, msm_rtb.size);
+
+ strlcpy(md_entry.name, "KRTB_BUF", sizeof(md_entry.name));
+ md_entry.virt_addr = (uintptr_t)msm_rtb.rtb;
+ md_entry.phys_addr = msm_rtb.phys;
+ md_entry.size = msm_rtb.size;
+ if (msm_minidump_add_region(&md_entry))
+ pr_info("Failed to add RTB in Minidump\n");
+
+#if defined(CONFIG_QCOM_RTB_SEPARATE_CPUS)
+ for_each_possible_cpu(cpu) {
+ atomic_t *a = &per_cpu(msm_rtb_idx_cpu, cpu);
+ atomic_set(a, cpu);
+ }
+ msm_rtb.step_size = num_possible_cpus();
+#else
+ atomic_set(&msm_rtb_idx, 0);
+ msm_rtb.step_size = 1;
+#endif
+
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &msm_rtb_panic_blk);
+ msm_rtb.initialized = 1;
+ return 0;
+}
+
+static struct of_device_id msm_match_table[] = {
+ {.compatible = RTB_COMPAT_STR},
+ {},
+};
+
+static struct platform_driver msm_rtb_driver = {
+ .driver = {
+ .name = "msm_rtb",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_match_table
+ },
+};
+
+static int __init msm_rtb_init(void)
+{
+ return platform_driver_probe(&msm_rtb_driver, msm_rtb_probe);
+}
+
+static void __exit msm_rtb_exit(void)
+{
+ platform_driver_unregister(&msm_rtb_driver);
+}
+module_init(msm_rtb_init)
+module_exit(msm_rtb_exit)
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c
index eb4220a132ec..49fa2e6eea98 100644
--- a/kernel/trace/power-traces.c
+++ b/kernel/trace/power-traces.c
@@ -15,4 +15,3 @@
EXPORT_TRACEPOINT_SYMBOL_GPL(suspend_resume);
EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle);
-
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9d197b18e09c..a67f792fb950 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -42,6 +42,7 @@
#include <linux/nmi.h>
#include <linux/fs.h>
#include <linux/sched/rt.h>
+#include <linux/coresight-stm.h>
#include "trace.h"
#include "trace_output.h"
@@ -574,8 +575,11 @@ int __trace_puts(unsigned long ip, const char *str, int size)
if (entry->buf[size - 1] != '\n') {
entry->buf[size] = '\n';
entry->buf[size + 1] = '\0';
- } else
+ stm_log(OST_ENTITY_TRACE_PRINTK, entry->buf, size + 2);
+ } else {
entry->buf[size] = '\0';
+ stm_log(OST_ENTITY_TRACE_PRINTK, entry->buf, size + 1);
+ }
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
@@ -616,6 +620,7 @@ int __trace_bputs(unsigned long ip, const char *str)
entry = ring_buffer_event_data(event);
entry->ip = ip;
entry->str = str;
+ stm_log(OST_ENTITY_TRACE_PRINTK, entry->str, strlen(entry->str)+1);
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
@@ -1360,11 +1365,11 @@ void tracing_reset_all_online_cpus(void)
#define SAVED_CMDLINES_DEFAULT 128
#define NO_CMDLINE_MAP UINT_MAX
-static unsigned saved_tgids[SAVED_CMDLINES_DEFAULT];
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
struct saved_cmdlines_buffer {
unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
unsigned *map_cmdline_to_pid;
+ unsigned *map_cmdline_to_tgid;
unsigned cmdline_num;
int cmdline_idx;
char *saved_cmdlines;
@@ -1395,12 +1400,23 @@ static int allocate_cmdlines_buffer(unsigned int val,
return -ENOMEM;
}
+ s->map_cmdline_to_tgid = kmalloc_array(val,
+ sizeof(*s->map_cmdline_to_tgid),
+ GFP_KERNEL);
+ if (!s->map_cmdline_to_tgid) {
+ kfree(s->map_cmdline_to_pid);
+ kfree(s->saved_cmdlines);
+ return -ENOMEM;
+ }
+
s->cmdline_idx = 0;
s->cmdline_num = val;
memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
sizeof(s->map_pid_to_cmdline));
memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
val * sizeof(*s->map_cmdline_to_pid));
+ memset(s->map_cmdline_to_tgid, NO_CMDLINE_MAP,
+ val * sizeof(*s->map_cmdline_to_tgid));
return 0;
}
@@ -1569,14 +1585,17 @@ static int trace_save_cmdline(struct task_struct *tsk)
tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
+ preempt_disable();
/*
* It's not the end of the world if we don't get
* the lock, but we also don't want to spin
* nor do we want to disable interrupts,
* so if we miss here, then better luck next time.
*/
- if (!arch_spin_trylock(&trace_cmdline_lock))
+ if (!arch_spin_trylock(&trace_cmdline_lock)) {
+ preempt_enable();
return 0;
+ }
idx = savedcmd->map_pid_to_cmdline[tpid];
if (idx == NO_CMDLINE_MAP) {
@@ -1588,8 +1607,9 @@ static int trace_save_cmdline(struct task_struct *tsk)
savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
set_cmdline(idx, tsk->comm);
- saved_tgids[idx] = tsk->tgid;
+ savedcmd->map_cmdline_to_tgid[idx] = tsk->tgid;
arch_spin_unlock(&trace_cmdline_lock);
+ preempt_enable();
return 1;
}
@@ -1632,19 +1652,29 @@ void trace_find_cmdline(int pid, char comm[])
preempt_enable();
}
-int trace_find_tgid(int pid)
+static int __find_tgid_locked(int pid)
{
unsigned map;
int tgid;
- preempt_disable();
- arch_spin_lock(&trace_cmdline_lock);
map = savedcmd->map_pid_to_cmdline[pid];
if (map != NO_CMDLINE_MAP)
- tgid = saved_tgids[map];
+ tgid = savedcmd->map_cmdline_to_tgid[map];
else
tgid = -1;
+ return tgid;
+}
+
+int trace_find_tgid(int pid)
+{
+ int tgid;
+
+ preempt_disable();
+ arch_spin_lock(&trace_cmdline_lock);
+
+ tgid = __find_tgid_locked(pid);
+
arch_spin_unlock(&trace_cmdline_lock);
preempt_enable();
@@ -2255,6 +2285,7 @@ __trace_array_vprintk(struct ring_buffer *buffer,
memcpy(&entry->buf, tbuffer, len + 1);
if (!call_filter_check_discard(call, entry, buffer, event)) {
+ stm_log(OST_ENTITY_TRACE_PRINTK, entry->buf, len + 1);
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
}
@@ -3989,10 +4020,15 @@ tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
{
char buf[64];
int r;
+ unsigned int n;
+ preempt_disable();
arch_spin_lock(&trace_cmdline_lock);
- r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
+ n = savedcmd->cmdline_num;
arch_spin_unlock(&trace_cmdline_lock);
+ preempt_enable();
+
+ r = scnprintf(buf, sizeof(buf), "%u\n", n);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
@@ -4001,6 +4037,7 @@ static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
{
kfree(s->saved_cmdlines);
kfree(s->map_cmdline_to_pid);
+ kfree(s->map_cmdline_to_tgid);
kfree(s);
}
@@ -4017,10 +4054,12 @@ static int tracing_resize_saved_cmdlines(unsigned int val)
return -ENOMEM;
}
+ preempt_disable();
arch_spin_lock(&trace_cmdline_lock);
savedcmd_temp = savedcmd;
savedcmd = s;
arch_spin_unlock(&trace_cmdline_lock);
+ preempt_enable();
free_saved_cmdlines_buffer(savedcmd_temp);
return 0;
@@ -4239,33 +4278,61 @@ tracing_saved_tgids_read(struct file *file, char __user *ubuf,
char *file_buf;
char *buf;
int len = 0;
- int pid;
int i;
+ int *pids;
+ int n = 0;
- file_buf = kmalloc(SAVED_CMDLINES_DEFAULT*(16+1+16), GFP_KERNEL);
- if (!file_buf)
- return -ENOMEM;
+ preempt_disable();
+ arch_spin_lock(&trace_cmdline_lock);
- buf = file_buf;
+ pids = kmalloc_array(savedcmd->cmdline_num, 2*sizeof(int), GFP_KERNEL);
+ if (!pids) {
+ arch_spin_unlock(&trace_cmdline_lock);
+ preempt_enable();
+ return -ENOMEM;
+ }
- for (i = 0; i < SAVED_CMDLINES_DEFAULT; i++) {
- int tgid;
- int r;
+ for (i = 0; i < savedcmd->cmdline_num; i++) {
+ int pid;
pid = savedcmd->map_cmdline_to_pid[i];
if (pid == -1 || pid == NO_CMDLINE_MAP)
continue;
- tgid = trace_find_tgid(pid);
- r = sprintf(buf, "%d %d\n", pid, tgid);
+ pids[n] = pid;
+ pids[n+1] = __find_tgid_locked(pid);
+ n += 2;
+ }
+ arch_spin_unlock(&trace_cmdline_lock);
+ preempt_enable();
+
+ if (n == 0) {
+ kfree(pids);
+ return 0;
+ }
+
+ /* enough to hold max pair of pids + space, lr and nul */
+ len = n * 12;
+ file_buf = kmalloc(len, GFP_KERNEL);
+ if (!file_buf) {
+ kfree(pids);
+ return -ENOMEM;
+ }
+
+ buf = file_buf;
+ for (i = 0; i < n && len > 0; i += 2) {
+ int r;
+
+ r = snprintf(buf, len, "%d %d\n", pids[i], pids[i+1]);
buf += r;
- len += r;
+ len -= r;
}
len = simple_read_from_buffer(ubuf, cnt, ppos,
- file_buf, len);
+ file_buf, buf - file_buf);
kfree(file_buf);
+ kfree(pids);
return len;
}
@@ -5296,8 +5363,11 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
if (entry->buf[cnt - 1] != '\n') {
entry->buf[cnt] = '\n';
entry->buf[cnt + 1] = '\0';
- } else
+ stm_log(OST_ENTITY_TRACE_MARKER, entry->buf, cnt + 2);
+ } else {
entry->buf[cnt] = '\0';
+ stm_log(OST_ENTITY_TRACE_MARKER, entry->buf, cnt + 1);
+ }
__buffer_unlock_commit(buffer, event);
diff --git a/kernel/trace/trace_cpu_freq_switch.c b/kernel/trace/trace_cpu_freq_switch.c
new file mode 100644
index 000000000000..f9dab6c4bb72
--- /dev/null
+++ b/kernel/trace/trace_cpu_freq_switch.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2012, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/rbtree.h>
+#include <linux/hrtimer.h>
+#include <linux/tracefs.h>
+#include <linux/ktime.h>
+#include <trace/events/power.h>
+#include "trace_stat.h"
+#include "trace.h"
+
+struct trans {
+ struct rb_node node;
+ unsigned int cpu;
+ unsigned int start_freq;
+ unsigned int end_freq;
+ unsigned int min_us;
+ unsigned int max_us;
+ ktime_t total_t;
+ unsigned int count;
+};
+static struct rb_root freq_trans_tree = RB_ROOT;
+
+static struct trans *tr_search(struct rb_root *root, unsigned int cpu,
+ unsigned int start_freq, unsigned int end_freq)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct trans *tr = container_of(node, struct trans, node);
+
+ if (cpu < tr->cpu)
+ node = node->rb_left;
+ else if (cpu > tr->cpu)
+ node = node->rb_right;
+ else if (start_freq < tr->start_freq)
+ node = node->rb_left;
+ else if (start_freq > tr->start_freq)
+ node = node->rb_right;
+ else if (end_freq < tr->end_freq)
+ node = node->rb_left;
+ else if (end_freq > tr->end_freq)
+ node = node->rb_right;
+ else
+ return tr;
+ }
+ return NULL;
+}
+
+static int tr_insert(struct rb_root *root, struct trans *tr)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ while (*new) {
+ struct trans *this = container_of(*new, struct trans, node);
+
+ parent = *new;
+ if (tr->cpu < this->cpu)
+ new = &((*new)->rb_left);
+ else if (tr->cpu > this->cpu)
+ new = &((*new)->rb_right);
+ else if (tr->start_freq < this->start_freq)
+ new = &((*new)->rb_left);
+ else if (tr->start_freq > this->start_freq)
+ new = &((*new)->rb_right);
+ else if (tr->end_freq < this->end_freq)
+ new = &((*new)->rb_left);
+ else if (tr->end_freq > this->end_freq)
+ new = &((*new)->rb_right);
+ else
+ return -EINVAL;
+ }
+
+ rb_link_node(&tr->node, parent, new);
+ rb_insert_color(&tr->node, root);
+
+ return 0;
+}
+
+struct trans_state {
+ spinlock_t lock;
+ unsigned int start_freq;
+ unsigned int end_freq;
+ ktime_t start_t;
+ bool started;
+};
+static DEFINE_PER_CPU(struct trans_state, freq_trans_state);
+
+static DEFINE_SPINLOCK(state_lock);
+
+static void probe_start(void *ignore, unsigned int start_freq,
+ unsigned int end_freq, unsigned int cpu)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&state_lock, flags);
+ per_cpu(freq_trans_state, cpu).start_freq = start_freq;
+ per_cpu(freq_trans_state, cpu).end_freq = end_freq;
+ per_cpu(freq_trans_state, cpu).start_t = ktime_get();
+ per_cpu(freq_trans_state, cpu).started = true;
+ spin_unlock_irqrestore(&state_lock, flags);
+}
+
+static void probe_end(void *ignore, unsigned int cpu)
+{
+ unsigned long flags;
+ struct trans *tr;
+ s64 dur_us;
+ ktime_t dur_t, end_t = ktime_get();
+
+ spin_lock_irqsave(&state_lock, flags);
+
+ if (!per_cpu(freq_trans_state, cpu).started)
+ goto out;
+
+ dur_t = ktime_sub(end_t, per_cpu(freq_trans_state, cpu).start_t);
+ dur_us = ktime_to_us(dur_t);
+
+ tr = tr_search(&freq_trans_tree, cpu,
+ per_cpu(freq_trans_state, cpu).start_freq,
+ per_cpu(freq_trans_state, cpu).end_freq);
+ if (!tr) {
+ tr = kzalloc(sizeof(*tr), GFP_ATOMIC);
+ if (!tr) {
+ WARN_ONCE(1, "CPU frequency trace is now invalid!\n");
+ goto out;
+ }
+
+ tr->start_freq = per_cpu(freq_trans_state, cpu).start_freq;
+ tr->end_freq = per_cpu(freq_trans_state, cpu).end_freq;
+ tr->cpu = cpu;
+ tr->min_us = UINT_MAX;
+ tr_insert(&freq_trans_tree, tr);
+ }
+ tr->total_t = ktime_add(tr->total_t, dur_t);
+ tr->count++;
+
+ if (dur_us > tr->max_us)
+ tr->max_us = dur_us;
+ if (dur_us < tr->min_us)
+ tr->min_us = dur_us;
+
+ per_cpu(freq_trans_state, cpu).started = false;
+out:
+ spin_unlock_irqrestore(&state_lock, flags);
+}
+
+static void *freq_switch_stat_start(struct tracer_stat *trace)
+{
+ struct rb_node *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state_lock, flags);
+ n = rb_first(&freq_trans_tree);
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return n;
+}
+
+static void *freq_switch_stat_next(void *prev, int idx)
+{
+ struct rb_node *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state_lock, flags);
+ n = rb_next(prev);
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return n;
+}
+
+static int freq_switch_stat_show(struct seq_file *s, void *p)
+{
+ unsigned long flags;
+ struct trans *tr = p;
+
+ spin_lock_irqsave(&state_lock, flags);
+ seq_printf(s, "%3d %9d %8d %5d %6lld %6d %6d\n", tr->cpu,
+ tr->start_freq, tr->end_freq, tr->count,
+ div_s64(ktime_to_us(tr->total_t), tr->count),
+ tr->min_us, tr->max_us);
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return 0;
+}
+
+static void freq_switch_stat_release(void *stat)
+{
+ struct trans *tr = stat;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state_lock, flags);
+ rb_erase(&tr->node, &freq_trans_tree);
+ spin_unlock_irqrestore(&state_lock, flags);
+ kfree(tr);
+}
+
+static int freq_switch_stat_headers(struct seq_file *s)
+{
+ seq_puts(s, "CPU START_KHZ END_KHZ COUNT AVG_US MIN_US MAX_US\n");
+ seq_puts(s, " | | | | | | |\n");
+ return 0;
+}
+
+struct tracer_stat freq_switch_stats __read_mostly = {
+ .name = "cpu_freq_switch",
+ .stat_start = freq_switch_stat_start,
+ .stat_next = freq_switch_stat_next,
+ .stat_show = freq_switch_stat_show,
+ .stat_release = freq_switch_stat_release,
+ .stat_headers = freq_switch_stat_headers
+};
+
+static void trace_freq_switch_disable(void)
+{
+ unregister_stat_tracer(&freq_switch_stats);
+ unregister_trace_cpu_frequency_switch_end(probe_end, NULL);
+ unregister_trace_cpu_frequency_switch_start(probe_start, NULL);
+ pr_info("disabled cpu frequency switch time profiling\n");
+}
+
+static int trace_freq_switch_enable(void)
+{
+ int ret;
+
+ ret = register_trace_cpu_frequency_switch_start(probe_start, NULL);
+ if (ret)
+ goto out;
+
+ ret = register_trace_cpu_frequency_switch_end(probe_end, NULL);
+ if (ret)
+ goto err_register_switch_end;
+
+ ret = register_stat_tracer(&freq_switch_stats);
+ if (ret)
+ goto err_register_stat_tracer;
+
+ pr_info("enabled cpu frequency switch time profiling\n");
+ return 0;
+
+err_register_stat_tracer:
+ unregister_trace_cpu_frequency_switch_end(probe_end, NULL);
+err_register_switch_end:
+ register_trace_cpu_frequency_switch_start(probe_start, NULL);
+out:
+ pr_err("failed to enable cpu frequency switch time profiling\n");
+
+ return ret;
+}
+
+static DEFINE_MUTEX(debugfs_lock);
+static bool trace_freq_switch_enabled;
+
+static int debug_toggle_tracing(void *data, u64 val)
+{
+ int ret = 0;
+
+ mutex_lock(&debugfs_lock);
+
+ if (val == 1 && !trace_freq_switch_enabled)
+ ret = trace_freq_switch_enable();
+ else if (val == 0 && trace_freq_switch_enabled)
+ trace_freq_switch_disable();
+ else if (val > 1)
+ ret = -EINVAL;
+
+ if (!ret)
+ trace_freq_switch_enabled = val;
+
+ mutex_unlock(&debugfs_lock);
+
+ return ret;
+}
+
+static int debug_tracing_state_get(void *data, u64 *val)
+{
+ mutex_lock(&debugfs_lock);
+ *val = trace_freq_switch_enabled;
+ mutex_unlock(&debugfs_lock);
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debug_tracing_state_fops, debug_tracing_state_get,
+ debug_toggle_tracing, "%llu\n");
+
+static int __init trace_freq_switch_init(void)
+{
+ struct dentry *d_tracer = tracing_init_dentry();
+
+ if (IS_ERR(d_tracer))
+ return 0;
+
+ tracefs_create_file("cpu_freq_switch_profile_enabled",
+ S_IRUGO | S_IWUSR, d_tracer, NULL, &debug_tracing_state_fops);
+
+ return 0;
+}
+late_initcall(trace_freq_switch_init);
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index cc9f7a9319be..731f6484b811 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -256,7 +256,8 @@ int perf_trace_add(struct perf_event *p_event, int flags)
void perf_trace_del(struct perf_event *p_event, int flags)
{
struct trace_event_call *tp_event = p_event->tp_event;
- hlist_del_rcu(&p_event->hlist_entry);
+ if (!hlist_unhashed(&p_event->hlist_entry))
+ hlist_del_rcu(&p_event->hlist_entry);
tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
}
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index b89e00c748f1..48aee7ff6381 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -287,14 +287,15 @@ static void output_printk(struct trace_event_buffer *fbuffer)
spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
}
-void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
+void trace_event_buffer_commit(struct trace_event_buffer *fbuffer,
+ unsigned long len)
{
if (tracepoint_printk)
output_printk(fbuffer);
event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
fbuffer->event, fbuffer->entry,
- fbuffer->flags, fbuffer->pc);
+ fbuffer->flags, fbuffer->pc, len);
}
EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 21b162c07e83..c00137ea939e 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -13,6 +13,7 @@
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ftrace.h>
+#include <linux/sched/sysctl.h>
#include "trace.h"
@@ -39,6 +40,12 @@ static int save_flags;
static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
static int start_irqsoff_tracer(struct trace_array *tr, int graph);
+/*
+ * irqsoff stack tracing threshold in ns.
+ * default: 1ms
+ */
+unsigned int sysctl_irqsoff_tracing_threshold_ns = 1000000UL;
+
#ifdef CONFIG_PREEMPT_TRACER
static inline int
preempt_trace(void)
@@ -454,17 +461,52 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
#else /* !CONFIG_PROVE_LOCKING */
+#ifdef CONFIG_PREEMPTIRQ_EVENTS
+struct irqsoff_store {
+ u64 ts;
+ unsigned long caddr[4];
+};
+
+static DEFINE_PER_CPU(struct irqsoff_store, the_irqsoff);
+#endif /* CONFIG_PREEMPTIRQ_EVENTS */
+
/*
* We are only interested in hardirq on/off events:
*/
static inline void tracer_hardirqs_on(void)
{
+#ifdef CONFIG_PREEMPTIRQ_EVENTS
+ struct irqsoff_store *is = &per_cpu(the_irqsoff,
+ raw_smp_processor_id());
+
+ if (!is->ts) {
+ is->ts = sched_clock();
+ is->caddr[0] = CALLER_ADDR0;
+ is->caddr[1] = CALLER_ADDR1;
+ is->caddr[2] = CALLER_ADDR2;
+ is->caddr[3] = CALLER_ADDR3;
+ }
+#endif /* CONFIG_PREEMPTIRQ_EVENTS */
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
static inline void tracer_hardirqs_off(void)
{
+#ifdef CONFIG_PREEMPTIRQ_EVENTS
+ struct irqsoff_store *is = &per_cpu(the_irqsoff,
+ raw_smp_processor_id());
+ u64 delta = 0;
+
+ if (is->ts) {
+ delta = sched_clock() - is->ts;
+ is->ts = 0;
+ }
+ if (delta > sysctl_irqsoff_tracing_threshold_ns)
+ trace_irqs_disable(delta, is->caddr[0], is->caddr[1],
+ is->caddr[2], is->caddr[3]);
+#endif /* CONFIG_PREEMPTIRQ_EVENTS */
+
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index ad1d6164e946..e82cff5c842c 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -304,7 +304,7 @@ static int t_show(struct seq_file *m, void *v)
if (!*fmt)
return 0;
- seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt);
+ seq_printf(m, "0x%lx : \"", 0L);
/*
* Tabs and new lines need to be converted.
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 6403f45da9d5..927fd4ad5846 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -359,7 +359,8 @@ static bool report_latency(struct trace_array *tr, cycle_t delta)
}
static void
-probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
+probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu,
+ unsigned int load)
{
if (task != wakeup_task)
return;
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 0655afbea83f..a01740a98afa 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -336,7 +336,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
event_trigger_unlock_commit(trace_file, buffer, event, entry,
- irq_flags, pc);
+ irq_flags, pc, 0);
}
static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
@@ -382,7 +382,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
entry->ret = syscall_get_return_value(current, regs);
event_trigger_unlock_commit(trace_file, buffer, event, entry,
- irq_flags, pc);
+ irq_flags, pc, 0);
}
static int reg_event_syscall_enter(struct trace_event_file *file,
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 518e62a398d2..0839129fbbd8 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -830,7 +830,7 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,
memcpy(data, ucb->buf, tu->tp.size + dsize);
- event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
+ event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0, 0);
}
/* uprobe handler */