mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-05 08:43:55 -06:00
target/riscv: vector single-width integer multiply instructions
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com> Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20200701152549.1218-19-zhiwei_liu@c-sky.com> Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
This commit is contained in:
parent
558fa7797c
commit
958b85f368
4 changed files with 214 additions and 0 deletions
|
@ -859,6 +859,10 @@ GEN_VEXT_AMO(vamomaxuw_v_w, uint32_t, uint32_t, idx_w, clearl)
|
|||
#define OP_UUU_H uint16_t, uint16_t, uint16_t, uint16_t, uint16_t
|
||||
#define OP_UUU_W uint32_t, uint32_t, uint32_t, uint32_t, uint32_t
|
||||
#define OP_UUU_D uint64_t, uint64_t, uint64_t, uint64_t, uint64_t
|
||||
#define OP_SUS_B int8_t, uint8_t, int8_t, uint8_t, int8_t
|
||||
#define OP_SUS_H int16_t, uint16_t, int16_t, uint16_t, int16_t
|
||||
#define OP_SUS_W int32_t, uint32_t, int32_t, uint32_t, int32_t
|
||||
#define OP_SUS_D int64_t, uint64_t, int64_t, uint64_t, int64_t
|
||||
|
||||
/* operation of two vector elements */
|
||||
typedef void opivv2_fn(void *vd, void *vs1, void *vs2, int i);
|
||||
|
@ -1603,3 +1607,162 @@ GEN_VEXT_VX(vmax_vx_b, 1, 1, clearb)
|
|||
GEN_VEXT_VX(vmax_vx_h, 2, 2, clearh)
|
||||
GEN_VEXT_VX(vmax_vx_w, 4, 4, clearl)
|
||||
GEN_VEXT_VX(vmax_vx_d, 8, 8, clearq)
|
||||
|
||||
/* Vector Single-Width Integer Multiply Instructions */
|
||||
#define DO_MUL(N, M) (N * M)
|
||||
RVVCALL(OPIVV2, vmul_vv_b, OP_SSS_B, H1, H1, H1, DO_MUL)
|
||||
RVVCALL(OPIVV2, vmul_vv_h, OP_SSS_H, H2, H2, H2, DO_MUL)
|
||||
RVVCALL(OPIVV2, vmul_vv_w, OP_SSS_W, H4, H4, H4, DO_MUL)
|
||||
RVVCALL(OPIVV2, vmul_vv_d, OP_SSS_D, H8, H8, H8, DO_MUL)
|
||||
GEN_VEXT_VV(vmul_vv_b, 1, 1, clearb)
|
||||
GEN_VEXT_VV(vmul_vv_h, 2, 2, clearh)
|
||||
GEN_VEXT_VV(vmul_vv_w, 4, 4, clearl)
|
||||
GEN_VEXT_VV(vmul_vv_d, 8, 8, clearq)
|
||||
|
||||
static int8_t do_mulh_b(int8_t s2, int8_t s1)
|
||||
{
|
||||
return (int16_t)s2 * (int16_t)s1 >> 8;
|
||||
}
|
||||
|
||||
static int16_t do_mulh_h(int16_t s2, int16_t s1)
|
||||
{
|
||||
return (int32_t)s2 * (int32_t)s1 >> 16;
|
||||
}
|
||||
|
||||
static int32_t do_mulh_w(int32_t s2, int32_t s1)
|
||||
{
|
||||
return (int64_t)s2 * (int64_t)s1 >> 32;
|
||||
}
|
||||
|
||||
static int64_t do_mulh_d(int64_t s2, int64_t s1)
|
||||
{
|
||||
uint64_t hi_64, lo_64;
|
||||
|
||||
muls64(&lo_64, &hi_64, s1, s2);
|
||||
return hi_64;
|
||||
}
|
||||
|
||||
static uint8_t do_mulhu_b(uint8_t s2, uint8_t s1)
|
||||
{
|
||||
return (uint16_t)s2 * (uint16_t)s1 >> 8;
|
||||
}
|
||||
|
||||
static uint16_t do_mulhu_h(uint16_t s2, uint16_t s1)
|
||||
{
|
||||
return (uint32_t)s2 * (uint32_t)s1 >> 16;
|
||||
}
|
||||
|
||||
static uint32_t do_mulhu_w(uint32_t s2, uint32_t s1)
|
||||
{
|
||||
return (uint64_t)s2 * (uint64_t)s1 >> 32;
|
||||
}
|
||||
|
||||
static uint64_t do_mulhu_d(uint64_t s2, uint64_t s1)
|
||||
{
|
||||
uint64_t hi_64, lo_64;
|
||||
|
||||
mulu64(&lo_64, &hi_64, s2, s1);
|
||||
return hi_64;
|
||||
}
|
||||
|
||||
static int8_t do_mulhsu_b(int8_t s2, uint8_t s1)
|
||||
{
|
||||
return (int16_t)s2 * (uint16_t)s1 >> 8;
|
||||
}
|
||||
|
||||
static int16_t do_mulhsu_h(int16_t s2, uint16_t s1)
|
||||
{
|
||||
return (int32_t)s2 * (uint32_t)s1 >> 16;
|
||||
}
|
||||
|
||||
static int32_t do_mulhsu_w(int32_t s2, uint32_t s1)
|
||||
{
|
||||
return (int64_t)s2 * (uint64_t)s1 >> 32;
|
||||
}
|
||||
|
||||
/*
|
||||
* Let A = signed operand,
|
||||
* B = unsigned operand
|
||||
* P = mulu64(A, B), unsigned product
|
||||
*
|
||||
* LET X = 2 ** 64 - A, 2's complement of A
|
||||
* SP = signed product
|
||||
* THEN
|
||||
* IF A < 0
|
||||
* SP = -X * B
|
||||
* = -(2 ** 64 - A) * B
|
||||
* = A * B - 2 ** 64 * B
|
||||
* = P - 2 ** 64 * B
|
||||
* ELSE
|
||||
* SP = P
|
||||
* THEN
|
||||
* HI_P -= (A < 0 ? B : 0)
|
||||
*/
|
||||
|
||||
static int64_t do_mulhsu_d(int64_t s2, uint64_t s1)
|
||||
{
|
||||
uint64_t hi_64, lo_64;
|
||||
|
||||
mulu64(&lo_64, &hi_64, s2, s1);
|
||||
|
||||
hi_64 -= s2 < 0 ? s1 : 0;
|
||||
return hi_64;
|
||||
}
|
||||
|
||||
RVVCALL(OPIVV2, vmulh_vv_b, OP_SSS_B, H1, H1, H1, do_mulh_b)
|
||||
RVVCALL(OPIVV2, vmulh_vv_h, OP_SSS_H, H2, H2, H2, do_mulh_h)
|
||||
RVVCALL(OPIVV2, vmulh_vv_w, OP_SSS_W, H4, H4, H4, do_mulh_w)
|
||||
RVVCALL(OPIVV2, vmulh_vv_d, OP_SSS_D, H8, H8, H8, do_mulh_d)
|
||||
RVVCALL(OPIVV2, vmulhu_vv_b, OP_UUU_B, H1, H1, H1, do_mulhu_b)
|
||||
RVVCALL(OPIVV2, vmulhu_vv_h, OP_UUU_H, H2, H2, H2, do_mulhu_h)
|
||||
RVVCALL(OPIVV2, vmulhu_vv_w, OP_UUU_W, H4, H4, H4, do_mulhu_w)
|
||||
RVVCALL(OPIVV2, vmulhu_vv_d, OP_UUU_D, H8, H8, H8, do_mulhu_d)
|
||||
RVVCALL(OPIVV2, vmulhsu_vv_b, OP_SUS_B, H1, H1, H1, do_mulhsu_b)
|
||||
RVVCALL(OPIVV2, vmulhsu_vv_h, OP_SUS_H, H2, H2, H2, do_mulhsu_h)
|
||||
RVVCALL(OPIVV2, vmulhsu_vv_w, OP_SUS_W, H4, H4, H4, do_mulhsu_w)
|
||||
RVVCALL(OPIVV2, vmulhsu_vv_d, OP_SUS_D, H8, H8, H8, do_mulhsu_d)
|
||||
GEN_VEXT_VV(vmulh_vv_b, 1, 1, clearb)
|
||||
GEN_VEXT_VV(vmulh_vv_h, 2, 2, clearh)
|
||||
GEN_VEXT_VV(vmulh_vv_w, 4, 4, clearl)
|
||||
GEN_VEXT_VV(vmulh_vv_d, 8, 8, clearq)
|
||||
GEN_VEXT_VV(vmulhu_vv_b, 1, 1, clearb)
|
||||
GEN_VEXT_VV(vmulhu_vv_h, 2, 2, clearh)
|
||||
GEN_VEXT_VV(vmulhu_vv_w, 4, 4, clearl)
|
||||
GEN_VEXT_VV(vmulhu_vv_d, 8, 8, clearq)
|
||||
GEN_VEXT_VV(vmulhsu_vv_b, 1, 1, clearb)
|
||||
GEN_VEXT_VV(vmulhsu_vv_h, 2, 2, clearh)
|
||||
GEN_VEXT_VV(vmulhsu_vv_w, 4, 4, clearl)
|
||||
GEN_VEXT_VV(vmulhsu_vv_d, 8, 8, clearq)
|
||||
|
||||
RVVCALL(OPIVX2, vmul_vx_b, OP_SSS_B, H1, H1, DO_MUL)
|
||||
RVVCALL(OPIVX2, vmul_vx_h, OP_SSS_H, H2, H2, DO_MUL)
|
||||
RVVCALL(OPIVX2, vmul_vx_w, OP_SSS_W, H4, H4, DO_MUL)
|
||||
RVVCALL(OPIVX2, vmul_vx_d, OP_SSS_D, H8, H8, DO_MUL)
|
||||
RVVCALL(OPIVX2, vmulh_vx_b, OP_SSS_B, H1, H1, do_mulh_b)
|
||||
RVVCALL(OPIVX2, vmulh_vx_h, OP_SSS_H, H2, H2, do_mulh_h)
|
||||
RVVCALL(OPIVX2, vmulh_vx_w, OP_SSS_W, H4, H4, do_mulh_w)
|
||||
RVVCALL(OPIVX2, vmulh_vx_d, OP_SSS_D, H8, H8, do_mulh_d)
|
||||
RVVCALL(OPIVX2, vmulhu_vx_b, OP_UUU_B, H1, H1, do_mulhu_b)
|
||||
RVVCALL(OPIVX2, vmulhu_vx_h, OP_UUU_H, H2, H2, do_mulhu_h)
|
||||
RVVCALL(OPIVX2, vmulhu_vx_w, OP_UUU_W, H4, H4, do_mulhu_w)
|
||||
RVVCALL(OPIVX2, vmulhu_vx_d, OP_UUU_D, H8, H8, do_mulhu_d)
|
||||
RVVCALL(OPIVX2, vmulhsu_vx_b, OP_SUS_B, H1, H1, do_mulhsu_b)
|
||||
RVVCALL(OPIVX2, vmulhsu_vx_h, OP_SUS_H, H2, H2, do_mulhsu_h)
|
||||
RVVCALL(OPIVX2, vmulhsu_vx_w, OP_SUS_W, H4, H4, do_mulhsu_w)
|
||||
RVVCALL(OPIVX2, vmulhsu_vx_d, OP_SUS_D, H8, H8, do_mulhsu_d)
|
||||
GEN_VEXT_VX(vmul_vx_b, 1, 1, clearb)
|
||||
GEN_VEXT_VX(vmul_vx_h, 2, 2, clearh)
|
||||
GEN_VEXT_VX(vmul_vx_w, 4, 4, clearl)
|
||||
GEN_VEXT_VX(vmul_vx_d, 8, 8, clearq)
|
||||
GEN_VEXT_VX(vmulh_vx_b, 1, 1, clearb)
|
||||
GEN_VEXT_VX(vmulh_vx_h, 2, 2, clearh)
|
||||
GEN_VEXT_VX(vmulh_vx_w, 4, 4, clearl)
|
||||
GEN_VEXT_VX(vmulh_vx_d, 8, 8, clearq)
|
||||
GEN_VEXT_VX(vmulhu_vx_b, 1, 1, clearb)
|
||||
GEN_VEXT_VX(vmulhu_vx_h, 2, 2, clearh)
|
||||
GEN_VEXT_VX(vmulhu_vx_w, 4, 4, clearl)
|
||||
GEN_VEXT_VX(vmulhu_vx_d, 8, 8, clearq)
|
||||
GEN_VEXT_VX(vmulhsu_vx_b, 1, 1, clearb)
|
||||
GEN_VEXT_VX(vmulhsu_vx_h, 2, 2, clearh)
|
||||
GEN_VEXT_VX(vmulhsu_vx_w, 4, 4, clearl)
|
||||
GEN_VEXT_VX(vmulhsu_vx_d, 8, 8, clearq)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue