mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-04 16:23:55 -06:00
target/riscv: Add Zvbb ISA extension support
This commit adds support for the Zvbb vector-crypto extension, which consists of the following instructions: * vrol.[vv,vx] * vror.[vv,vx,vi] * vbrev8.v * vrev8.v * vandn.[vv,vx] * vbrev.v * vclz.v * vctz.v * vcpop.v * vwsll.[vv,vx,vi] Translation functions are defined in `target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in `target/riscv/vcrypto_helper.c`. Co-authored-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk> Co-authored-by: William Salmon <will.salmon@codethink.co.uk> Co-authored-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk> [max.chou@sifive.com: Fix imm mode of vror.vi] Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk> Signed-off-by: William Salmon <will.salmon@codethink.co.uk> Signed-off-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk> Signed-off-by: Dickon Hood <dickon.hood@codethink.co.uk> Signed-off-by: Max Chou <max.chou@sifive.com> Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com> [max.chou@sifive.com: Exposed x-zvbb property] Message-ID: <20230711165917.2629866-9-max.chou@sifive.com> Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
This commit is contained in:
parent
2152e48b50
commit
0602847289
6 changed files with 397 additions and 0 deletions
|
@ -20,6 +20,7 @@
|
|||
#include "qemu/osdep.h"
|
||||
#include "qemu/host-utils.h"
|
||||
#include "qemu/bitops.h"
|
||||
#include "qemu/bswap.h"
|
||||
#include "cpu.h"
|
||||
#include "exec/memop.h"
|
||||
#include "exec/exec-all.h"
|
||||
|
@ -57,3 +58,140 @@ RVVCALL(OPIVV2, vclmulh_vv, OP_UUU_D, H8, H8, H8, clmulh64)
|
|||
GEN_VEXT_VV(vclmulh_vv, 8)
|
||||
RVVCALL(OPIVX2, vclmulh_vx, OP_UUU_D, H8, H8, clmulh64)
|
||||
GEN_VEXT_VX(vclmulh_vx, 8)
|
||||
|
||||
RVVCALL(OPIVV2, vror_vv_b, OP_UUU_B, H1, H1, H1, ror8)
|
||||
RVVCALL(OPIVV2, vror_vv_h, OP_UUU_H, H2, H2, H2, ror16)
|
||||
RVVCALL(OPIVV2, vror_vv_w, OP_UUU_W, H4, H4, H4, ror32)
|
||||
RVVCALL(OPIVV2, vror_vv_d, OP_UUU_D, H8, H8, H8, ror64)
|
||||
GEN_VEXT_VV(vror_vv_b, 1)
|
||||
GEN_VEXT_VV(vror_vv_h, 2)
|
||||
GEN_VEXT_VV(vror_vv_w, 4)
|
||||
GEN_VEXT_VV(vror_vv_d, 8)
|
||||
|
||||
RVVCALL(OPIVX2, vror_vx_b, OP_UUU_B, H1, H1, ror8)
|
||||
RVVCALL(OPIVX2, vror_vx_h, OP_UUU_H, H2, H2, ror16)
|
||||
RVVCALL(OPIVX2, vror_vx_w, OP_UUU_W, H4, H4, ror32)
|
||||
RVVCALL(OPIVX2, vror_vx_d, OP_UUU_D, H8, H8, ror64)
|
||||
GEN_VEXT_VX(vror_vx_b, 1)
|
||||
GEN_VEXT_VX(vror_vx_h, 2)
|
||||
GEN_VEXT_VX(vror_vx_w, 4)
|
||||
GEN_VEXT_VX(vror_vx_d, 8)
|
||||
|
||||
RVVCALL(OPIVV2, vrol_vv_b, OP_UUU_B, H1, H1, H1, rol8)
|
||||
RVVCALL(OPIVV2, vrol_vv_h, OP_UUU_H, H2, H2, H2, rol16)
|
||||
RVVCALL(OPIVV2, vrol_vv_w, OP_UUU_W, H4, H4, H4, rol32)
|
||||
RVVCALL(OPIVV2, vrol_vv_d, OP_UUU_D, H8, H8, H8, rol64)
|
||||
GEN_VEXT_VV(vrol_vv_b, 1)
|
||||
GEN_VEXT_VV(vrol_vv_h, 2)
|
||||
GEN_VEXT_VV(vrol_vv_w, 4)
|
||||
GEN_VEXT_VV(vrol_vv_d, 8)
|
||||
|
||||
RVVCALL(OPIVX2, vrol_vx_b, OP_UUU_B, H1, H1, rol8)
|
||||
RVVCALL(OPIVX2, vrol_vx_h, OP_UUU_H, H2, H2, rol16)
|
||||
RVVCALL(OPIVX2, vrol_vx_w, OP_UUU_W, H4, H4, rol32)
|
||||
RVVCALL(OPIVX2, vrol_vx_d, OP_UUU_D, H8, H8, rol64)
|
||||
GEN_VEXT_VX(vrol_vx_b, 1)
|
||||
GEN_VEXT_VX(vrol_vx_h, 2)
|
||||
GEN_VEXT_VX(vrol_vx_w, 4)
|
||||
GEN_VEXT_VX(vrol_vx_d, 8)
|
||||
|
||||
static uint64_t brev8(uint64_t val)
|
||||
{
|
||||
val = ((val & 0x5555555555555555ull) << 1) |
|
||||
((val & 0xAAAAAAAAAAAAAAAAull) >> 1);
|
||||
val = ((val & 0x3333333333333333ull) << 2) |
|
||||
((val & 0xCCCCCCCCCCCCCCCCull) >> 2);
|
||||
val = ((val & 0x0F0F0F0F0F0F0F0Full) << 4) |
|
||||
((val & 0xF0F0F0F0F0F0F0F0ull) >> 4);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
RVVCALL(OPIVV1, vbrev8_v_b, OP_UU_B, H1, H1, brev8)
|
||||
RVVCALL(OPIVV1, vbrev8_v_h, OP_UU_H, H2, H2, brev8)
|
||||
RVVCALL(OPIVV1, vbrev8_v_w, OP_UU_W, H4, H4, brev8)
|
||||
RVVCALL(OPIVV1, vbrev8_v_d, OP_UU_D, H8, H8, brev8)
|
||||
GEN_VEXT_V(vbrev8_v_b, 1)
|
||||
GEN_VEXT_V(vbrev8_v_h, 2)
|
||||
GEN_VEXT_V(vbrev8_v_w, 4)
|
||||
GEN_VEXT_V(vbrev8_v_d, 8)
|
||||
|
||||
#define DO_IDENTITY(a) (a)
|
||||
RVVCALL(OPIVV1, vrev8_v_b, OP_UU_B, H1, H1, DO_IDENTITY)
|
||||
RVVCALL(OPIVV1, vrev8_v_h, OP_UU_H, H2, H2, bswap16)
|
||||
RVVCALL(OPIVV1, vrev8_v_w, OP_UU_W, H4, H4, bswap32)
|
||||
RVVCALL(OPIVV1, vrev8_v_d, OP_UU_D, H8, H8, bswap64)
|
||||
GEN_VEXT_V(vrev8_v_b, 1)
|
||||
GEN_VEXT_V(vrev8_v_h, 2)
|
||||
GEN_VEXT_V(vrev8_v_w, 4)
|
||||
GEN_VEXT_V(vrev8_v_d, 8)
|
||||
|
||||
#define DO_ANDN(a, b) ((a) & ~(b))
|
||||
RVVCALL(OPIVV2, vandn_vv_b, OP_UUU_B, H1, H1, H1, DO_ANDN)
|
||||
RVVCALL(OPIVV2, vandn_vv_h, OP_UUU_H, H2, H2, H2, DO_ANDN)
|
||||
RVVCALL(OPIVV2, vandn_vv_w, OP_UUU_W, H4, H4, H4, DO_ANDN)
|
||||
RVVCALL(OPIVV2, vandn_vv_d, OP_UUU_D, H8, H8, H8, DO_ANDN)
|
||||
GEN_VEXT_VV(vandn_vv_b, 1)
|
||||
GEN_VEXT_VV(vandn_vv_h, 2)
|
||||
GEN_VEXT_VV(vandn_vv_w, 4)
|
||||
GEN_VEXT_VV(vandn_vv_d, 8)
|
||||
|
||||
RVVCALL(OPIVX2, vandn_vx_b, OP_UUU_B, H1, H1, DO_ANDN)
|
||||
RVVCALL(OPIVX2, vandn_vx_h, OP_UUU_H, H2, H2, DO_ANDN)
|
||||
RVVCALL(OPIVX2, vandn_vx_w, OP_UUU_W, H4, H4, DO_ANDN)
|
||||
RVVCALL(OPIVX2, vandn_vx_d, OP_UUU_D, H8, H8, DO_ANDN)
|
||||
GEN_VEXT_VX(vandn_vx_b, 1)
|
||||
GEN_VEXT_VX(vandn_vx_h, 2)
|
||||
GEN_VEXT_VX(vandn_vx_w, 4)
|
||||
GEN_VEXT_VX(vandn_vx_d, 8)
|
||||
|
||||
RVVCALL(OPIVV1, vbrev_v_b, OP_UU_B, H1, H1, revbit8)
|
||||
RVVCALL(OPIVV1, vbrev_v_h, OP_UU_H, H2, H2, revbit16)
|
||||
RVVCALL(OPIVV1, vbrev_v_w, OP_UU_W, H4, H4, revbit32)
|
||||
RVVCALL(OPIVV1, vbrev_v_d, OP_UU_D, H8, H8, revbit64)
|
||||
GEN_VEXT_V(vbrev_v_b, 1)
|
||||
GEN_VEXT_V(vbrev_v_h, 2)
|
||||
GEN_VEXT_V(vbrev_v_w, 4)
|
||||
GEN_VEXT_V(vbrev_v_d, 8)
|
||||
|
||||
RVVCALL(OPIVV1, vclz_v_b, OP_UU_B, H1, H1, clz8)
|
||||
RVVCALL(OPIVV1, vclz_v_h, OP_UU_H, H2, H2, clz16)
|
||||
RVVCALL(OPIVV1, vclz_v_w, OP_UU_W, H4, H4, clz32)
|
||||
RVVCALL(OPIVV1, vclz_v_d, OP_UU_D, H8, H8, clz64)
|
||||
GEN_VEXT_V(vclz_v_b, 1)
|
||||
GEN_VEXT_V(vclz_v_h, 2)
|
||||
GEN_VEXT_V(vclz_v_w, 4)
|
||||
GEN_VEXT_V(vclz_v_d, 8)
|
||||
|
||||
RVVCALL(OPIVV1, vctz_v_b, OP_UU_B, H1, H1, ctz8)
|
||||
RVVCALL(OPIVV1, vctz_v_h, OP_UU_H, H2, H2, ctz16)
|
||||
RVVCALL(OPIVV1, vctz_v_w, OP_UU_W, H4, H4, ctz32)
|
||||
RVVCALL(OPIVV1, vctz_v_d, OP_UU_D, H8, H8, ctz64)
|
||||
GEN_VEXT_V(vctz_v_b, 1)
|
||||
GEN_VEXT_V(vctz_v_h, 2)
|
||||
GEN_VEXT_V(vctz_v_w, 4)
|
||||
GEN_VEXT_V(vctz_v_d, 8)
|
||||
|
||||
RVVCALL(OPIVV1, vcpop_v_b, OP_UU_B, H1, H1, ctpop8)
|
||||
RVVCALL(OPIVV1, vcpop_v_h, OP_UU_H, H2, H2, ctpop16)
|
||||
RVVCALL(OPIVV1, vcpop_v_w, OP_UU_W, H4, H4, ctpop32)
|
||||
RVVCALL(OPIVV1, vcpop_v_d, OP_UU_D, H8, H8, ctpop64)
|
||||
GEN_VEXT_V(vcpop_v_b, 1)
|
||||
GEN_VEXT_V(vcpop_v_h, 2)
|
||||
GEN_VEXT_V(vcpop_v_w, 4)
|
||||
GEN_VEXT_V(vcpop_v_d, 8)
|
||||
|
||||
#define DO_SLL(N, M) (N << (M & (sizeof(N) * 8 - 1)))
|
||||
RVVCALL(OPIVV2, vwsll_vv_b, WOP_UUU_B, H2, H1, H1, DO_SLL)
|
||||
RVVCALL(OPIVV2, vwsll_vv_h, WOP_UUU_H, H4, H2, H2, DO_SLL)
|
||||
RVVCALL(OPIVV2, vwsll_vv_w, WOP_UUU_W, H8, H4, H4, DO_SLL)
|
||||
GEN_VEXT_VV(vwsll_vv_b, 2)
|
||||
GEN_VEXT_VV(vwsll_vv_h, 4)
|
||||
GEN_VEXT_VV(vwsll_vv_w, 8)
|
||||
|
||||
RVVCALL(OPIVX2, vwsll_vx_b, WOP_UUU_B, H2, H1, DO_SLL)
|
||||
RVVCALL(OPIVX2, vwsll_vx_h, WOP_UUU_H, H4, H2, DO_SLL)
|
||||
RVVCALL(OPIVX2, vwsll_vx_w, WOP_UUU_W, H8, H4, DO_SLL)
|
||||
GEN_VEXT_VX(vwsll_vx_b, 2)
|
||||
GEN_VEXT_VX(vwsll_vx_h, 4)
|
||||
GEN_VEXT_VX(vwsll_vx_w, 8)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue