summaryrefslogtreecommitdiff
path: root/drivers/mmc/core/ring_buffer.c
blob: 83945e1cae407f74c4622477644647eb514bcb1c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
/*
 * Copyright (c) 2017, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <linux/mmc/ring_buffer.h>
#include <linux/mmc/host.h>

void mmc_stop_tracing(struct mmc_host *mmc)
{
	mmc->trace_buf.stop_tracing = true;
}

void mmc_trace_write(struct mmc_host *mmc,
			const char *fmt, ...)
{
	unsigned int idx;
	va_list args;
	char *event;
	unsigned long flags;
	char str[MMC_TRACE_EVENT_SZ];

	if (unlikely(!mmc->trace_buf.data) ||
			unlikely(mmc->trace_buf.stop_tracing))
		return;

	/*
	 * Here an increment and modulus is used to keep
	 * index within array bounds. The cast to unsigned is
	 * necessary so increment and rolover wraps to 0 correctly
	 */
	spin_lock_irqsave(&mmc->trace_buf.trace_lock, flags);
	mmc->trace_buf.wr_idx += 1;
	idx = ((unsigned int)mmc->trace_buf.wr_idx) &
			(MMC_TRACE_RBUF_NUM_EVENTS - 1);
	spin_unlock_irqrestore(&mmc->trace_buf.trace_lock, flags);

	/* Catch some unlikely machine specific wrap-around bug */
	if (unlikely(idx > (MMC_TRACE_RBUF_NUM_EVENTS - 1))) {
		pr_err("%s: %s: Invalid idx:%d for mmc trace, tracing stopped !\n",
			mmc_hostname(mmc), __func__, idx);
		mmc_stop_tracing(mmc);
		return;
	}

	event = &mmc->trace_buf.data[idx * MMC_TRACE_EVENT_SZ];
	va_start(args, fmt);
	snprintf(str, MMC_TRACE_EVENT_SZ, "<%d> %lld: %s: %s",
		raw_smp_processor_id(),
		ktime_to_ns(ktime_get()),
		mmc_hostname(mmc), fmt);
	memset(event, '\0', MMC_TRACE_EVENT_SZ);
	vscnprintf(event, MMC_TRACE_EVENT_SZ, str, args);
	va_end(args);
}

void mmc_trace_init(struct mmc_host *mmc)
{
	BUILD_BUG_ON_NOT_POWER_OF_2(MMC_TRACE_RBUF_NUM_EVENTS);

	mmc->trace_buf.data = (char *)
				__get_free_pages(GFP_KERNEL|__GFP_ZERO,
				MMC_TRACE_RBUF_SZ_ORDER);

	if (!mmc->trace_buf.data) {
		pr_err("%s: %s: Unable to allocate trace for mmc\n",
			__func__, mmc_hostname(mmc));
		return;
	}

	spin_lock_init(&mmc->trace_buf.trace_lock);
	mmc->trace_buf.wr_idx = -1;
}

void mmc_trace_free(struct mmc_host *mmc)
{
	if (mmc->trace_buf.data)
		free_pages((unsigned long)mmc->trace_buf.data,
			MMC_TRACE_RBUF_SZ_ORDER);
}

void mmc_dump_trace_buffer(struct mmc_host *mmc, struct seq_file *s)
{
	unsigned int idx, cur_idx;
	unsigned int N = MMC_TRACE_RBUF_NUM_EVENTS - 1;
	char *event;
	unsigned long flags;

	if (!mmc->trace_buf.data)
		return;

	spin_lock_irqsave(&mmc->trace_buf.trace_lock, flags);
	idx = ((unsigned int)mmc->trace_buf.wr_idx) & N;
	cur_idx = (idx + 1) & N;

	do {
		event = &mmc->trace_buf.data[cur_idx * MMC_TRACE_EVENT_SZ];
		if (s)
			seq_printf(s, "%s", (char *)event);
		else
			pr_err("%s", (char *)event);
		cur_idx = (cur_idx + 1) & N;
		if (cur_idx == idx) {
			event =
			  &mmc->trace_buf.data[cur_idx * MMC_TRACE_EVENT_SZ];
			if (s)
				seq_printf(s, "latest_event: %s",
					(char *)event);
			else
				pr_err("latest_event: %s", (char *)event);
			break;
		}
	} while (1);
	spin_unlock_irqrestore(&mmc->trace_buf.trace_lock, flags);
}