[Skiboot] [PATCH 12/15] external/trace: Use trace_reader struct

Jordan Niethe jniethe5 at gmail.com
Mon Mar 25 11:14:22 AEDT 2019


Currently the trace_get and trace_empty functions operate on a tracebuf
struct. This requires being able to write to the tracebuf. If dump_trace
were able to use these functions it would be convenient as it would
reduce code duplication and these functions are already unit tested. To
prepare for dump_trace being able to use them, change the functions to
using a trace_reader and update the unit tests accordingly.
---
 core/test/run-trace.c  | 46 +++++++++++++++++++++------------------
 external/trace/trace.c | 49 ++++++++++++++++++++++++------------------
 external/trace/trace.h |  4 ++--
 3 files changed, 55 insertions(+), 44 deletions(-)

diff --git a/core/test/run-trace.c b/core/test/run-trace.c
index 6cb9a582078d..19bd2e67dbdd 100644
--- a/core/test/run-trace.c
+++ b/core/test/run-trace.c
@@ -102,9 +102,9 @@ extern struct dt_node *opal_node;
 
 #include "../trace.c"
 
-#define rmb() lwsync()
-
 #include "../external/trace/trace.c"
+static struct trace_reader trace_readers[CPUS];
+struct trace_reader *my_trace_reader;
 #include "../device.c"
 
 char __rodata_start[1], __rodata_end[1];
@@ -184,6 +184,8 @@ static void test_parallel(void)
 		fake_cpus[i].trace = p + i * len;
 		fake_cpus[i].trace->tb.max_size = cpu_to_be32(sizeof(union trace));
 		fake_cpus[i].is_secondary = false;
+		memset(&trace_readers[i], 0, sizeof(struct trace_reader));
+		trace_readers[i].tb = &fake_cpus[i].trace->tb;
 	}
 
 	for (i = 0; i < CPUS; i++) {
@@ -198,7 +200,7 @@ static void test_parallel(void)
 		union trace t;
 
 		for (i = 0; i < CPUS; i++) {
-			if (trace_get(&t, &fake_cpus[(i+last) % CPUS].trace->tb))
+			if (trace_get(&t, &trace_readers[(i+last) % CPUS]))
 				break;
 		}
 
@@ -276,17 +278,19 @@ int main(void)
 		fake_cpus[i].primary = &fake_cpus[i & ~0x1];
 	}
 	my_fake_cpu = &fake_cpus[0];
+	my_trace_reader = &trace_readers[0];
 	init_trace_buffers();
 
 	for (i = 0; i < CPUS; i++) {
-		assert(trace_empty(&fake_cpus[i].trace->tb));
-		assert(!trace_get(&trace, &fake_cpus[i].trace->tb));
+		trace_readers[i].tb = &fake_cpus[i].trace->tb;
+		assert(trace_empty(&trace_readers[i]));
+		assert(!trace_get(&trace, &trace_readers[i]));
 	}
 
 	assert(sizeof(trace.hdr) % 8 == 0);
 	timestamp = 1;
 	trace_add(&minimal, 100, sizeof(trace.hdr));
-	assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+	assert(trace_get(&trace, my_trace_reader));
 	assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
 	assert(be64_to_cpu(trace.hdr.timestamp) == timestamp);
 
@@ -296,19 +300,19 @@ int main(void)
 		trace_add(&minimal, 99 + (i%2), sizeof(trace.hdr));
 	}
 
-	assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+	assert(trace_get(&trace, my_trace_reader));
 	/* First one must be overflow marker. */
 	assert(trace.hdr.type == TRACE_OVERFLOW);
 	assert(trace.hdr.len_div_8 * 8 == sizeof(trace.overflow));
 	assert(be64_to_cpu(trace.overflow.bytes_missed) == minimal.hdr.len_div_8 * 8);
 
 	for (i = 0; i < TBUF_SZ / (minimal.hdr.len_div_8 * 8); i++) {
-		assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+		assert(trace_get(&trace, my_trace_reader));
 		assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
 		assert(be64_to_cpu(trace.hdr.timestamp) == i+1);
 		assert(trace.hdr.type == 99 + ((i+1)%2));
 	}
-	assert(!trace_get(&trace, &my_fake_cpu->trace->tb));
+	assert(!trace_get(&trace, my_trace_reader));
 
 	/* Now put in some weird-length ones, to test overlap.
 	 * Last power of 2, minus 8. */
@@ -317,12 +321,12 @@ int main(void)
 		timestamp = i;
 		trace_add(&large, 100 + (i%2), (1 << (j-1)));
 	}
-	assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+	assert(trace_get(&trace, my_trace_reader));
 	assert(trace.hdr.type == TRACE_OVERFLOW);
-	assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+	assert(trace_get(&trace, my_trace_reader));
 	assert(trace.hdr.len_div_8 == large.hdr.len_div_8);
 	i = be64_to_cpu(trace.hdr.timestamp);
-	while (trace_get(&trace, &my_fake_cpu->trace->tb))
+	while (trace_get(&trace, my_trace_reader))
 		assert(be64_to_cpu(trace.hdr.timestamp) == ++i);
 
 	/* Test repeats. */
@@ -335,30 +339,30 @@ int main(void)
 	timestamp = i+1;
 	trace_add(&minimal, 101, sizeof(trace.hdr));
 
-	assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+	assert(trace_get(&trace, my_trace_reader));
 	assert(trace.hdr.timestamp == 0);
 	assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
 	assert(trace.hdr.type == 100);
-	assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+	assert(trace_get(&trace, my_trace_reader));
 	assert(trace.hdr.type == TRACE_REPEAT);
 	assert(trace.hdr.len_div_8 * 8 == sizeof(trace.repeat));
 	assert(be16_to_cpu(trace.repeat.num) == 65535);
 	assert(be64_to_cpu(trace.repeat.timestamp) == 65535);
-	assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+	assert(trace_get(&trace, my_trace_reader));
 	assert(be64_to_cpu(trace.hdr.timestamp) == 65536);
 	assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
 	assert(trace.hdr.type == 100);
-	assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+	assert(trace_get(&trace, my_trace_reader));
 	assert(trace.hdr.type == TRACE_REPEAT);
 	assert(trace.hdr.len_div_8 * 8 == sizeof(trace.repeat));
 	assert(be16_to_cpu(trace.repeat.num) == 1);
 	assert(be64_to_cpu(trace.repeat.timestamp) == 65537);
 
-	assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+	assert(trace_get(&trace, my_trace_reader));
 	assert(be64_to_cpu(trace.hdr.timestamp) == 65538);
 	assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
 	assert(trace.hdr.type == 101);
-	assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+	assert(trace_get(&trace, my_trace_reader));
 	assert(trace.hdr.type == TRACE_REPEAT);
 	assert(trace.hdr.len_div_8 * 8 == sizeof(trace.repeat));
 	assert(be16_to_cpu(trace.repeat.num) == 1);
@@ -367,7 +371,7 @@ int main(void)
 	/* Now, test adding repeat while we're reading... */
 	timestamp = 0;
 	trace_add(&minimal, 100, sizeof(trace.hdr));
-	assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+	assert(trace_get(&trace, my_trace_reader));
 	assert(be64_to_cpu(trace.hdr.timestamp) == 0);
 	assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
 	assert(trace.hdr.type == 100);
@@ -375,7 +379,7 @@ int main(void)
 	for (i = 1; i < TBUF_SZ; i++) {
 		timestamp = i;
 		trace_add(&minimal, 100, sizeof(trace.hdr));
-		assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+		assert(trace_get(&trace, my_trace_reader));
 		if (i % 65536 == 0) {
 			assert(trace.hdr.type == 100);
 			assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
@@ -385,7 +389,7 @@ int main(void)
 			assert(be16_to_cpu(trace.repeat.num) == 1);
 		}
 		assert(be64_to_cpu(trace.repeat.timestamp) == i);
-		assert(!trace_get(&trace, &my_fake_cpu->trace->tb));
+		assert(!trace_get(&trace, my_trace_reader));
 	}
 
 	for (i = 0; i < CPUS; i++)
diff --git a/external/trace/trace.c b/external/trace/trace.c
index c59cf5c2256f..00d2b8782177 100644
--- a/external/trace/trace.c
+++ b/external/trace/trace.c
@@ -17,14 +17,21 @@
 #include <external/trace/trace.h>
 #include "../ccan/endian/endian.h"
 #include "../ccan/short_types/short_types.h"
+#include "trace.h"
 #include <trace_types.h>
 #include <errno.h>
 
-bool trace_empty(const struct tracebuf *tb)
+#if defined(__powerpc__) || defined(__powerpc64__)
+#define rmb() lwsync()
+#else
+#define rmb()
+#endif
+
+bool trace_empty(const struct trace_reader *tr)
 {
 	const struct trace_repeat *rep;
 
-	if (tb->rpos == tb->end)
+	if (tr->rpos == tr->tb->end)
 		return true;
 
 	/*
@@ -32,29 +39,29 @@ bool trace_empty(const struct tracebuf *tb)
 	 * we've already seen every repeat for (yet which may be
 	 * incremented in future), we're also empty.
 	 */
-	rep = (void *)tb->buf + be64_to_cpu(tb->rpos) % TBUF_SZ;
-	if (be64_to_cpu(tb->end) != be64_to_cpu(tb->rpos) + sizeof(*rep))
+	rep = (void *)tr->tb->buf + be64_to_cpu(tr->rpos) % TBUF_SZ;
+	if (be64_to_cpu(tr->tb->end) != be64_to_cpu(tr->rpos) + sizeof(*rep))
 		return false;
 
 	if (rep->type != TRACE_REPEAT)
 		return false;
 
-	if (be16_to_cpu(rep->num) != be32_to_cpu(tb->last_repeat))
+	if (be16_to_cpu(rep->num) != be32_to_cpu(tr->last_repeat))
 		return false;
 
 	return true;
 }
 
 /* You can't read in parallel, so some locking required in caller. */
-bool trace_get(union trace *t, struct tracebuf *tb)
+bool trace_get(union trace *t, struct trace_reader *tr)
 {
 	u64 start, rpos;
 	size_t len;
 
-	len = sizeof(*t) < be32_to_cpu(tb->max_size) ? sizeof(*t) :
-		be32_to_cpu(tb->max_size);
+	len = sizeof(*t) < be64_to_cpu(tr->tb->max_size) ? sizeof(*t) :
+		be64_to_cpu(tr->tb->max_size);
 
-	if (trace_empty(tb))
+	if (trace_empty(tr))
 		return false;
 
 again:
@@ -62,12 +69,12 @@ again:
 	 * The actual buffer is slightly larger than tbsize, so this
 	 * memcpy is always valid.
 	 */
-	memcpy(t, tb->buf + be64_to_cpu(tb->rpos) % TBUF_SZ, len);
+	memcpy(t, tr->tb->buf + be64_to_cpu(tr->rpos) % TBUF_SZ, len);
 
-	rmb(); /* read barrier, so we read tb->start after copying record. */
+	rmb(); /* read barrier, so we read tr->tb->start after copying record. */
 
-	start = be64_to_cpu(tb->start);
-	rpos = be64_to_cpu(tb->rpos);
+	start = be64_to_cpu(tr->tb->start);
+	rpos = be64_to_cpu(tr->rpos);
 
 	/* Now, was that overwritten? */
 	if (rpos < start) {
@@ -76,7 +83,7 @@ again:
 		t->overflow.type = TRACE_OVERFLOW;
 		t->overflow.len_div_8 = sizeof(t->overflow) / 8;
 		t->overflow.bytes_missed = cpu_to_be64(start - rpos);
-		tb->rpos = cpu_to_be64(start);
+		tr->rpos = cpu_to_be64(start);
 		return true;
 	}
 
@@ -85,10 +92,10 @@ again:
 		u32 num = be16_to_cpu(t->repeat.num);
 
 		/* In case we've read some already... */
-		t->repeat.num = cpu_to_be16(num - be32_to_cpu(tb->last_repeat));
+		t->repeat.num = cpu_to_be16(num - be32_to_cpu(tr->last_repeat));
 
 		/* Record how many repeats we saw this time. */
-		tb->last_repeat = cpu_to_be32(num);
+		tr->last_repeat = cpu_to_be32(num);
 
 		/* Don't report an empty repeat buffer. */
 		if (t->repeat.num == 0) {
@@ -96,16 +103,16 @@ again:
 			 * This can't be the last buffer, otherwise
 			 * trace_empty would have returned true.
 			 */
-			assert(be64_to_cpu(tb->end) >
+			assert(be64_to_cpu(tr->tb->end) >
 			       rpos + t->hdr.len_div_8 * 8);
 			/* Skip to next entry. */
-			tb->rpos = cpu_to_be64(rpos + t->hdr.len_div_8 * 8);
-			tb->last_repeat = 0;
+			tr->rpos = cpu_to_be64(rpos + t->hdr.len_div_8 * 8);
+			tr->last_repeat = 0;
 			goto again;
 		}
 	} else {
-		tb->last_repeat = 0;
-		tb->rpos = cpu_to_be64(rpos + t->hdr.len_div_8 * 8);
+		tr->last_repeat = 0;
+		tr->rpos = cpu_to_be64(rpos + t->hdr.len_div_8 * 8);
 	}
 
 	return true;
diff --git a/external/trace/trace.h b/external/trace/trace.h
index ef9b2a073973..2c453ea3d969 100644
--- a/external/trace/trace.h
+++ b/external/trace/trace.h
@@ -30,9 +30,9 @@ struct trace_reader {
 };
 
 /* Is this tracebuf empty? */
-bool trace_empty(const struct tracebuf *tracebuf);
+bool trace_empty(const struct trace_reader *tr);
 
 /* Get the next trace from this buffer (false if empty). */
-bool trace_get(union trace *t, struct tracebuf *tb);
+bool trace_get(union trace *t, struct trace_reader *tr);
 
 #endif
-- 
2.20.1



More information about the Skiboot mailing list