[PATCH] powerpc/sstep: Fix array out of bound warning
Ravi Bangoria
ravi.bangoria at linux.ibm.com
Fri Jan 15 17:16:20 AEDT 2021
Compiling kernel with -Warray-bounds throws below warning:
In function 'emulate_vsx_store':
warning: array subscript is above array bounds [-Warray-bounds]
buf.d[2] = byterev_8(reg->d[1]);
~~~~~^~~
buf.d[3] = byterev_8(reg->d[0]);
~~~~~^~~
Fix it by converting local variable 'union vsx_reg buf' into an array.
Also consider function argument 'union vsx_reg *reg' as array instead
of pointer because callers are actually passing an array to it.
Fixes: af99da74333b ("powerpc/sstep: Support VSX vector paired storage access instructions")
Signed-off-by: Ravi Bangoria <ravi.bangoria at linux.ibm.com>
---
arch/powerpc/lib/sstep.c | 61 ++++++++++++++++++++--------------------
1 file changed, 31 insertions(+), 30 deletions(-)
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index bf7a7d62ae8b..5b4281ade5b6 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -723,7 +723,8 @@ void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
const unsigned char *bp;
size = GETSIZE(op->type);
- reg->d[0] = reg->d[1] = 0;
+ reg[0].d[0] = reg[0].d[1] = 0;
+ reg[1].d[0] = reg[1].d[1] = 0;
switch (op->element_size) {
case 32:
@@ -742,25 +743,25 @@ void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
/* scalar loads, lxvd2x, lxvdsx */
read_size = (size >= 8) ? 8 : size;
i = IS_LE ? 8 : 8 - read_size;
- memcpy(®->b[i], mem, read_size);
+ memcpy(®[0].b[i], mem, read_size);
if (rev)
- do_byte_reverse(®->b[i], 8);
+ do_byte_reverse(®[0].b[i], 8);
if (size < 8) {
if (op->type & SIGNEXT) {
/* size == 4 is the only case here */
- reg->d[IS_LE] = (signed int) reg->d[IS_LE];
+ reg[0].d[IS_LE] = (signed int)reg[0].d[IS_LE];
} else if (op->vsx_flags & VSX_FPCONV) {
preempt_disable();
- conv_sp_to_dp(®->fp[1 + IS_LE],
- ®->dp[IS_LE]);
+ conv_sp_to_dp(®[0].fp[1 + IS_LE],
+ ®[0].dp[IS_LE]);
preempt_enable();
}
} else {
if (size == 16) {
unsigned long v = *(unsigned long *)(mem + 8);
- reg->d[IS_BE] = !rev ? v : byterev_8(v);
+ reg[0].d[IS_BE] = !rev ? v : byterev_8(v);
} else if (op->vsx_flags & VSX_SPLAT)
- reg->d[IS_BE] = reg->d[IS_LE];
+ reg[0].d[IS_BE] = reg[0].d[IS_LE];
}
break;
case 4:
@@ -768,13 +769,13 @@ void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
wp = mem;
for (j = 0; j < size / 4; ++j) {
i = IS_LE ? 3 - j : j;
- reg->w[i] = !rev ? *wp++ : byterev_4(*wp++);
+ reg[0].w[i] = !rev ? *wp++ : byterev_4(*wp++);
}
if (op->vsx_flags & VSX_SPLAT) {
- u32 val = reg->w[IS_LE ? 3 : 0];
+ u32 val = reg[0].w[IS_LE ? 3 : 0];
for (; j < 4; ++j) {
i = IS_LE ? 3 - j : j;
- reg->w[i] = val;
+ reg[0].w[i] = val;
}
}
break;
@@ -783,7 +784,7 @@ void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
hp = mem;
for (j = 0; j < size / 2; ++j) {
i = IS_LE ? 7 - j : j;
- reg->h[i] = !rev ? *hp++ : byterev_2(*hp++);
+ reg[0].h[i] = !rev ? *hp++ : byterev_2(*hp++);
}
break;
case 1:
@@ -791,7 +792,7 @@ void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
bp = mem;
for (j = 0; j < size; ++j) {
i = IS_LE ? 15 - j : j;
- reg->b[i] = *bp++;
+ reg[0].b[i] = *bp++;
}
break;
}
@@ -804,7 +805,7 @@ void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
{
int size, write_size;
int i, j;
- union vsx_reg buf;
+ union vsx_reg buf[2];
unsigned int *wp;
unsigned short *hp;
unsigned char *bp;
@@ -818,11 +819,11 @@ void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
break;
if (rev) {
/* reverse 32 bytes */
- buf.d[0] = byterev_8(reg->d[3]);
- buf.d[1] = byterev_8(reg->d[2]);
- buf.d[2] = byterev_8(reg->d[1]);
- buf.d[3] = byterev_8(reg->d[0]);
- reg = &buf;
+ buf[0].d[0] = byterev_8(reg[1].d[1]);
+ buf[0].d[1] = byterev_8(reg[1].d[0]);
+ buf[1].d[0] = byterev_8(reg[0].d[1]);
+ buf[1].d[1] = byterev_8(reg[0].d[0]);
+ reg = buf;
}
memcpy(mem, reg, size);
break;
@@ -834,9 +835,9 @@ void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
rev = !rev;
if (rev) {
/* reverse 16 bytes */
- buf.d[0] = byterev_8(reg->d[1]);
- buf.d[1] = byterev_8(reg->d[0]);
- reg = &buf;
+ buf[0].d[0] = byterev_8(reg[0].d[1]);
+ buf[0].d[1] = byterev_8(reg[0].d[0]);
+ reg = buf;
}
memcpy(mem, reg, size);
break;
@@ -845,15 +846,15 @@ void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
write_size = (size >= 8) ? 8 : size;
i = IS_LE ? 8 : 8 - write_size;
if (size < 8 && op->vsx_flags & VSX_FPCONV) {
- buf.d[0] = buf.d[1] = 0;
+ buf[0].d[0] = buf[0].d[1] = 0;
preempt_disable();
- conv_dp_to_sp(®->dp[IS_LE], &buf.fp[1 + IS_LE]);
+ conv_dp_to_sp(®[0].dp[IS_LE], &buf[0].fp[1 + IS_LE]);
preempt_enable();
- reg = &buf;
+ reg = buf;
}
- memcpy(mem, ®->b[i], write_size);
+ memcpy(mem, ®[0].b[i], write_size);
if (size == 16)
- memcpy(mem + 8, ®->d[IS_BE], 8);
+ memcpy(mem + 8, ®[0].d[IS_BE], 8);
if (unlikely(rev)) {
do_byte_reverse(mem, write_size);
if (size == 16)
@@ -865,7 +866,7 @@ void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
wp = mem;
for (j = 0; j < size / 4; ++j) {
i = IS_LE ? 3 - j : j;
- *wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]);
+ *wp++ = !rev ? reg[0].w[i] : byterev_4(reg[0].w[i]);
}
break;
case 2:
@@ -873,7 +874,7 @@ void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
hp = mem;
for (j = 0; j < size / 2; ++j) {
i = IS_LE ? 7 - j : j;
- *hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]);
+ *hp++ = !rev ? reg[0].h[i] : byterev_2(reg[0].h[i]);
}
break;
case 1:
@@ -881,7 +882,7 @@ void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
bp = mem;
for (j = 0; j < size; ++j) {
i = IS_LE ? 15 - j : j;
- *bp++ = reg->b[i];
+ *bp++ = reg[0].b[i];
}
break;
}
--
2.26.2
More information about the Linuxppc-dev
mailing list