[Skiboot] [PATCH 03/15] core/trace: Change mask/and to modulo for buffer offset

Oliver oohall at gmail.com
Mon Mar 25 11:48:30 AEDT 2019


On Mon, Mar 25, 2019 at 11:17 AM Jordan Niethe <jniethe5 at gmail.com> wrote:
>
> The current method of calculating buffer offsets is to use a mask and
> bitwise 'and'. This limits the potential sizes of the buffer to powers
> of two. It is desirable to have more flexibility in the size of the
> buffer for later on when we will mmap the trace buffers.

Cany ou explain you need to explain *why* we need to support non-power
of two sized buffers. I know that it's because the header and buffer
need to be 64K aligned so we can map them, so the buffer itself can't
be 64K but that needs to be spelled out explicitly in the commit
message.

> Use modulo to calculate buffer offset instead.

>The boot trace buffer had a different
> size when it carried around its own mask. Now it is the same size as the
> other buffers.

This seems like an unrelated change.

> ---
>  core/test/run-trace.c  |  1 -
>  core/trace.c           | 17 +++++++----------
>  external/trace/trace.c |  4 ++--
>  include/trace_types.h  |  2 --
>  4 files changed, 9 insertions(+), 15 deletions(-)
>
> diff --git a/core/test/run-trace.c b/core/test/run-trace.c
> index ef7ebfe12655..e53ab30ddf69 100644
> --- a/core/test/run-trace.c
> +++ b/core/test/run-trace.c
> @@ -182,7 +182,6 @@ static void test_parallel(void)
>
>         for (i = 0; i < CPUS; i++) {
>                 fake_cpus[i].trace = p + i * len;
> -               fake_cpus[i].trace->tb.mask = cpu_to_be64(TBUF_SZ - 1);
>                 fake_cpus[i].trace->tb.max_size = cpu_to_be32(sizeof(union trace));
>                 fake_cpus[i].is_secondary = false;
>         }
> diff --git a/core/trace.c b/core/trace.c
> index d4e1b1d0d245..e1d5c32aede4 100644
> --- a/core/trace.c
> +++ b/core/trace.c
> @@ -32,16 +32,14 @@
>  #define MAX_SIZE (sizeof(union trace) + 7)
>
>  /* Smaller trace buffer for early booting */
> -#define BOOT_TBUF_SZ 65536
>  static struct {
>         struct trace_info trace_info;
> -       char buf[BOOT_TBUF_SZ + MAX_SIZE];
> +       char buf[TBUF_SZ + MAX_SIZE];
>  } boot_tracebuf;

The boot trace buffer is staticly allocated in the linker script since
we need it available before we've even discovered how much memory is
in the system. Once we've initialised the per-core trace buffers this
becomes dead space so expanding it to 1MB isn't idea, especially since
we don't expect to put much stuff in it to begin with.

The trace buffer header contains size information to allow us to have
variably sized buffers, so I'm not sure why this is necessary.

>  void init_boot_tracebuf(struct cpu_thread *boot_cpu)
>  {
>         init_lock(&boot_tracebuf.trace_info.lock);
> -       boot_tracebuf.trace_info.tb.mask = cpu_to_be64(BOOT_TBUF_SZ - 1);
>         boot_tracebuf.trace_info.tb.max_size = cpu_to_be32(MAX_SIZE);
>
>         boot_cpu->trace = &boot_tracebuf.trace_info;
> @@ -61,7 +59,7 @@ static bool handle_repeat(struct tracebuf *tb, const union trace *trace)
>         struct trace_repeat *rpt;
>         u32 len;
>
> -       prev = (void *)tb->buf + be64_to_cpu(tb->last & tb->mask);
> +       prev = (void *)tb->buf + be64_to_cpu(tb->last) % TBUF_SZ;
>
>         if (prev->type != trace->hdr.type
>             || prev->len_div_8 != trace->hdr.len_div_8
> @@ -80,7 +78,7 @@ static bool handle_repeat(struct tracebuf *tb, const union trace *trace)
>         if (be64_to_cpu(tb->last) + len != be64_to_cpu(tb->end)) {
>                 u64 pos = be64_to_cpu(tb->last) + len;
>                 /* FIXME: Reader is not protected from seeing this! */
> -               rpt = (void *)tb->buf + (pos & be64_to_cpu(tb->mask));
> +               rpt = (void *)tb->buf + pos % TBUF_SZ;
>                 assert(pos + rpt->len_div_8*8 == be64_to_cpu(tb->end));
>                 assert(rpt->type == TRACE_REPEAT);
>
> @@ -99,7 +97,7 @@ static bool handle_repeat(struct tracebuf *tb, const union trace *trace)
>          */
>         assert(trace->hdr.len_div_8 * 8 >= sizeof(*rpt));
>
> -       rpt = (void *)tb->buf + be64_to_cpu(tb->end & tb->mask);
> +       rpt = (void *)tb->buf + be64_to_cpu(tb->end) % TBUF_SZ;
>         rpt->timestamp = trace->hdr.timestamp;
>         rpt->type = TRACE_REPEAT;
>         rpt->len_div_8 = sizeof(*rpt) >> 3;
> @@ -138,12 +136,12 @@ void trace_add(union trace *trace, u8 type, u16 len)
>         lock(&ti->lock);
>
>         /* Throw away old entries before we overwrite them. */
> -       while ((be64_to_cpu(ti->tb.start) + be64_to_cpu(ti->tb.mask) + 1)
> +       while ((be64_to_cpu(ti->tb.start) + TBUF_SZ)
>                < (be64_to_cpu(ti->tb.end) + tsz)) {
>                 struct trace_hdr *hdr;
>
>                 hdr = (void *)ti->tb.buf +
> -                       be64_to_cpu(ti->tb.start & ti->tb.mask);
> +                       be64_to_cpu(ti->tb.start) % TBUF_SZ;
>                 ti->tb.start = cpu_to_be64(be64_to_cpu(ti->tb.start) +
>                                            (hdr->len_div_8 << 3));
>         }
> @@ -154,7 +152,7 @@ void trace_add(union trace *trace, u8 type, u16 len)
>         /* Check for duplicates... */
>         if (!handle_repeat(&ti->tb, trace)) {
>                 /* This may go off end, and that's why ti->tb.buf is oversize */
> -               memcpy(ti->tb.buf + be64_to_cpu(ti->tb.end & ti->tb.mask),
> +               memcpy(ti->tb.buf + be64_to_cpu(ti->tb.end) % TBUF_SZ,
>                        trace, tsz);
>                 ti->tb.last = ti->tb.end;
>                 lwsync(); /* write barrier: write entry before exposing */
> @@ -220,7 +218,6 @@ void init_trace_buffers(void)
>                         any = t->trace;
>                         memset(t->trace, 0, size);
>                         init_lock(&t->trace->lock);
> -                       t->trace->tb.mask = cpu_to_be64(TBUF_SZ - 1);
>                         t->trace->tb.max_size = cpu_to_be32(MAX_SIZE);
>                         trace_add_desc(any, sizeof(t->trace->tb) +
>                                        tracebuf_extra());
> diff --git a/external/trace/trace.c b/external/trace/trace.c
> index 745da53cbd9c..c59cf5c2256f 100644
> --- a/external/trace/trace.c
> +++ b/external/trace/trace.c
> @@ -32,7 +32,7 @@ bool trace_empty(const struct tracebuf *tb)
>          * we've already seen every repeat for (yet which may be
>          * incremented in future), we're also empty.
>          */
> -       rep = (void *)tb->buf + be64_to_cpu(tb->rpos & tb->mask);
> +       rep = (void *)tb->buf + be64_to_cpu(tb->rpos) % TBUF_SZ;
>         if (be64_to_cpu(tb->end) != be64_to_cpu(tb->rpos) + sizeof(*rep))
>                 return false;
>
> @@ -62,7 +62,7 @@ again:
>          * The actual buffer is slightly larger than tbsize, so this
>          * memcpy is always valid.
>          */
> -       memcpy(t, tb->buf + be64_to_cpu(tb->rpos & tb->mask), len);
> +       memcpy(t, tb->buf + be64_to_cpu(tb->rpos) % TBUF_SZ, len);
>
>         rmb(); /* read barrier, so we read tb->start after copying record. */
>
> diff --git a/include/trace_types.h b/include/trace_types.h
> index 83c49a263c92..d14c78d2eed6 100644
> --- a/include/trace_types.h
> +++ b/include/trace_types.h
> @@ -28,8 +28,6 @@
>
>  /* One per cpu, plus one for NMIs */
>  struct tracebuf {
> -       /* Mask to apply to get buffer offset. */
> -       __be64 mask;
>         /* This where the buffer starts. */
>         __be64 start;
>         /* This is where writer has written to. */
> --
> 2.20.1
>
> _______________________________________________
> Skiboot mailing list
> Skiboot at lists.ozlabs.org
> https://lists.ozlabs.org/listinfo/skiboot


More information about the Skiboot mailing list