[Skiboot] [PATCH 03/15] core/trace: Change mask/and to modulo for buffer offset

Jordan Niethe jniethe5 at gmail.com
Mon Mar 25 11:14:13 AEDT 2019


The current method of calculating buffer offsets is to use a mask and
bitwise 'and'. This limits the potential sizes of the buffer to powers
of two. It is desirable to have more flexibility in the size of the
buffer for later on when we will mmap the trace buffers. Use modulo to
calculate buffer offset instead. The boot trace buffer had a different
size when it carried around its own mask. Now it is the same size as the
other buffers.
---
 core/test/run-trace.c  |  1 -
 core/trace.c           | 17 +++++++----------
 external/trace/trace.c |  4 ++--
 include/trace_types.h  |  2 --
 4 files changed, 9 insertions(+), 15 deletions(-)

diff --git a/core/test/run-trace.c b/core/test/run-trace.c
index ef7ebfe12655..e53ab30ddf69 100644
--- a/core/test/run-trace.c
+++ b/core/test/run-trace.c
@@ -182,7 +182,6 @@ static void test_parallel(void)
 
 	for (i = 0; i < CPUS; i++) {
 		fake_cpus[i].trace = p + i * len;
-		fake_cpus[i].trace->tb.mask = cpu_to_be64(TBUF_SZ - 1);
 		fake_cpus[i].trace->tb.max_size = cpu_to_be32(sizeof(union trace));
 		fake_cpus[i].is_secondary = false;
 	}
diff --git a/core/trace.c b/core/trace.c
index d4e1b1d0d245..e1d5c32aede4 100644
--- a/core/trace.c
+++ b/core/trace.c
@@ -32,16 +32,14 @@
 #define MAX_SIZE (sizeof(union trace) + 7)
 
 /* Smaller trace buffer for early booting */
-#define BOOT_TBUF_SZ 65536
 static struct {
 	struct trace_info trace_info;
-	char buf[BOOT_TBUF_SZ + MAX_SIZE];
+	char buf[TBUF_SZ + MAX_SIZE];
 } boot_tracebuf;
 
 void init_boot_tracebuf(struct cpu_thread *boot_cpu)
 {
 	init_lock(&boot_tracebuf.trace_info.lock);
-	boot_tracebuf.trace_info.tb.mask = cpu_to_be64(BOOT_TBUF_SZ - 1);
 	boot_tracebuf.trace_info.tb.max_size = cpu_to_be32(MAX_SIZE);
 
 	boot_cpu->trace = &boot_tracebuf.trace_info;
@@ -61,7 +59,7 @@ static bool handle_repeat(struct tracebuf *tb, const union trace *trace)
 	struct trace_repeat *rpt;
 	u32 len;
 
-	prev = (void *)tb->buf + be64_to_cpu(tb->last & tb->mask);
+	prev = (void *)tb->buf + be64_to_cpu(tb->last) % TBUF_SZ;
 
 	if (prev->type != trace->hdr.type
 	    || prev->len_div_8 != trace->hdr.len_div_8
@@ -80,7 +78,7 @@ static bool handle_repeat(struct tracebuf *tb, const union trace *trace)
 	if (be64_to_cpu(tb->last) + len != be64_to_cpu(tb->end)) {
 		u64 pos = be64_to_cpu(tb->last) + len;
 		/* FIXME: Reader is not protected from seeing this! */
-		rpt = (void *)tb->buf + (pos & be64_to_cpu(tb->mask));
+		rpt = (void *)tb->buf + pos % TBUF_SZ;
 		assert(pos + rpt->len_div_8*8 == be64_to_cpu(tb->end));
 		assert(rpt->type == TRACE_REPEAT);
 
@@ -99,7 +97,7 @@ static bool handle_repeat(struct tracebuf *tb, const union trace *trace)
 	 */
 	assert(trace->hdr.len_div_8 * 8 >= sizeof(*rpt));
 
-	rpt = (void *)tb->buf + be64_to_cpu(tb->end & tb->mask);
+	rpt = (void *)tb->buf + be64_to_cpu(tb->end) % TBUF_SZ;
 	rpt->timestamp = trace->hdr.timestamp;
 	rpt->type = TRACE_REPEAT;
 	rpt->len_div_8 = sizeof(*rpt) >> 3;
@@ -138,12 +136,12 @@ void trace_add(union trace *trace, u8 type, u16 len)
 	lock(&ti->lock);
 
 	/* Throw away old entries before we overwrite them. */
-	while ((be64_to_cpu(ti->tb.start) + be64_to_cpu(ti->tb.mask) + 1)
+	while ((be64_to_cpu(ti->tb.start) + TBUF_SZ)
 	       < (be64_to_cpu(ti->tb.end) + tsz)) {
 		struct trace_hdr *hdr;
 
 		hdr = (void *)ti->tb.buf +
-			be64_to_cpu(ti->tb.start & ti->tb.mask);
+			be64_to_cpu(ti->tb.start) % TBUF_SZ;
 		ti->tb.start = cpu_to_be64(be64_to_cpu(ti->tb.start) +
 					   (hdr->len_div_8 << 3));
 	}
@@ -154,7 +152,7 @@ void trace_add(union trace *trace, u8 type, u16 len)
 	/* Check for duplicates... */
 	if (!handle_repeat(&ti->tb, trace)) {
 		/* This may go off end, and that's why ti->tb.buf is oversize */
-		memcpy(ti->tb.buf + be64_to_cpu(ti->tb.end & ti->tb.mask),
+		memcpy(ti->tb.buf + be64_to_cpu(ti->tb.end) % TBUF_SZ,
 		       trace, tsz);
 		ti->tb.last = ti->tb.end;
 		lwsync(); /* write barrier: write entry before exposing */
@@ -220,7 +218,6 @@ void init_trace_buffers(void)
 			any = t->trace;
 			memset(t->trace, 0, size);
 			init_lock(&t->trace->lock);
-			t->trace->tb.mask = cpu_to_be64(TBUF_SZ - 1);
 			t->trace->tb.max_size = cpu_to_be32(MAX_SIZE);
 			trace_add_desc(any, sizeof(t->trace->tb) +
 				       tracebuf_extra());
diff --git a/external/trace/trace.c b/external/trace/trace.c
index 745da53cbd9c..c59cf5c2256f 100644
--- a/external/trace/trace.c
+++ b/external/trace/trace.c
@@ -32,7 +32,7 @@ bool trace_empty(const struct tracebuf *tb)
 	 * we've already seen every repeat for (yet which may be
 	 * incremented in future), we're also empty.
 	 */
-	rep = (void *)tb->buf + be64_to_cpu(tb->rpos & tb->mask);
+	rep = (void *)tb->buf + be64_to_cpu(tb->rpos) % TBUF_SZ;
 	if (be64_to_cpu(tb->end) != be64_to_cpu(tb->rpos) + sizeof(*rep))
 		return false;
 
@@ -62,7 +62,7 @@ again:
 	 * The actual buffer is slightly larger than tbsize, so this
 	 * memcpy is always valid.
 	 */
-	memcpy(t, tb->buf + be64_to_cpu(tb->rpos & tb->mask), len);
+	memcpy(t, tb->buf + be64_to_cpu(tb->rpos) % TBUF_SZ, len);
 
 	rmb(); /* read barrier, so we read tb->start after copying record. */
 
diff --git a/include/trace_types.h b/include/trace_types.h
index 83c49a263c92..d14c78d2eed6 100644
--- a/include/trace_types.h
+++ b/include/trace_types.h
@@ -28,8 +28,6 @@
 
 /* One per cpu, plus one for NMIs */
 struct tracebuf {
-	/* Mask to apply to get buffer offset. */
-	__be64 mask;
 	/* This where the buffer starts. */
 	__be64 start;
 	/* This is where writer has written to. */
-- 
2.20.1



More information about the Skiboot mailing list