mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-05 08:43:55 -06:00
target/arm: Implement MVE VADDV
Implement the MVE VADDV insn, which performs an addition across vector lanes. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210617121628.20116-44-peter.maydell@linaro.org
This commit is contained in:
parent
8625693ac4
commit
6f060a636b
4 changed files with 76 additions and 0 deletions
|
@ -1134,3 +1134,27 @@ DO_LDAVH(vrmlaldavhuw, 4, uint32_t, false, int128_add, int128_add, int128_make64
|
|||
|
||||
DO_LDAVH(vrmlsldavhsw, 4, int32_t, false, int128_add, int128_sub, int128_makes64)
|
||||
DO_LDAVH(vrmlsldavhxsw, 4, int32_t, true, int128_add, int128_sub, int128_makes64)
|
||||
|
||||
/* Vector add across vector */
|
||||
#define DO_VADDV(OP, ESIZE, TYPE) \
|
||||
uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
|
||||
uint32_t ra) \
|
||||
{ \
|
||||
uint16_t mask = mve_element_mask(env); \
|
||||
unsigned e; \
|
||||
TYPE *m = vm; \
|
||||
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
||||
if (mask & 1) { \
|
||||
ra += m[H##ESIZE(e)]; \
|
||||
} \
|
||||
} \
|
||||
mve_advance_vpt(env); \
|
||||
return ra; \
|
||||
} \
|
||||
|
||||
DO_VADDV(vaddvsb, 1, uint8_t)
|
||||
DO_VADDV(vaddvsh, 2, uint16_t)
|
||||
DO_VADDV(vaddvsw, 4, uint32_t)
|
||||
DO_VADDV(vaddvub, 1, uint8_t)
|
||||
DO_VADDV(vaddvuh, 2, uint16_t)
|
||||
DO_VADDV(vaddvuw, 4, uint32_t)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue