target/riscv: add vector stride load and store instructions

Vector strided operations access the first memory element at the base address,
and then access subsequent elements at address increments given by the byte
offset contained in the x register specified by rs2.

Vector unit-stride operations access elements stored contiguously in memory
starting from the base effective address. It can been seen as a special
case of strided operations.

Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Message-Id: <20200701152549.1218-7-zhiwei_liu@c-sky.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
This commit is contained in:
LIU Zhiwei 2020-07-01 23:24:54 +08:00 committed by Alistair Francis
parent f476f17740
commit 751538d5da
6 changed files with 914 additions and 0 deletions

View file

@ -15,6 +15,9 @@
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "tcg/tcg-op-gvec.h"
#include "tcg/tcg-gvec-desc.h"
#include "internals.h"
static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a)
{
@ -77,3 +80,355 @@ static bool trans_vsetvli(DisasContext *ctx, arg_vsetvli *a)
tcg_temp_free(dst);
return true;
}
/* vector register offset from env */
static uint32_t vreg_ofs(DisasContext *s, int reg)
{
return offsetof(CPURISCVState, vreg) + reg * s->vlen / 8;
}
/* check functions */
/*
* In cpu_get_tb_cpu_state(), set VILL if RVV was not present.
* So RVV is also be checked in this function.
*/
static bool vext_check_isa_ill(DisasContext *s)
{
return !s->vill;
}
/*
* There are two rules check here.
*
* 1. Vector register numbers are multiples of LMUL. (Section 3.2)
*
* 2. For all widening instructions, the destination LMUL value must also be
* a supported LMUL value. (Section 11.2)
*/
static bool vext_check_reg(DisasContext *s, uint32_t reg, bool widen)
{
/*
* The destination vector register group results are arranged as if both
* SEW and LMUL were at twice their current settings. (Section 11.2).
*/
int legal = widen ? 2 << s->lmul : 1 << s->lmul;
return !((s->lmul == 0x3 && widen) || (reg % legal));
}
/*
* There are two rules check here.
*
* 1. The destination vector register group for a masked vector instruction can
* only overlap the source mask register (v0) when LMUL=1. (Section 5.3)
*
* 2. In widen instructions and some other insturctions, like vslideup.vx,
* there is no need to check whether LMUL=1.
*/
static bool vext_check_overlap_mask(DisasContext *s, uint32_t vd, bool vm,
bool force)
{
return (vm != 0 || vd != 0) || (!force && (s->lmul == 0));
}
/* The LMUL setting must be such that LMUL * NFIELDS <= 8. (Section 7.8) */
static bool vext_check_nf(DisasContext *s, uint32_t nf)
{
return (1 << s->lmul) * nf <= 8;
}
/* common translation macro */
#define GEN_VEXT_TRANS(NAME, SEQ, ARGTYPE, OP, CHECK) \
static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE *a)\
{ \
if (CHECK(s, a)) { \
return OP(s, a, SEQ); \
} \
return false; \
}
/*
*** unit stride load and store
*/
typedef void gen_helper_ldst_us(TCGv_ptr, TCGv_ptr, TCGv,
TCGv_env, TCGv_i32);
static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
gen_helper_ldst_us *fn, DisasContext *s)
{
TCGv_ptr dest, mask;
TCGv base;
TCGv_i32 desc;
TCGLabel *over = gen_new_label();
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
dest = tcg_temp_new_ptr();
mask = tcg_temp_new_ptr();
base = tcg_temp_new();
/*
* As simd_desc supports at most 256 bytes, and in this implementation,
* the max vector group length is 2048 bytes. So split it into two parts.
*
* The first part is vlen in bytes, encoded in maxsz of simd_desc.
* The second part is lmul, encoded in data of simd_desc.
*/
desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
gen_get_gpr(base, rs1);
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
fn(dest, mask, base, cpu_env, desc);
tcg_temp_free_ptr(dest);
tcg_temp_free_ptr(mask);
tcg_temp_free(base);
tcg_temp_free_i32(desc);
gen_set_label(over);
return true;
}
static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
{
uint32_t data = 0;
gen_helper_ldst_us *fn;
static gen_helper_ldst_us * const fns[2][7][4] = {
/* masked unit stride load */
{ { gen_helper_vlb_v_b_mask, gen_helper_vlb_v_h_mask,
gen_helper_vlb_v_w_mask, gen_helper_vlb_v_d_mask },
{ NULL, gen_helper_vlh_v_h_mask,
gen_helper_vlh_v_w_mask, gen_helper_vlh_v_d_mask },
{ NULL, NULL,
gen_helper_vlw_v_w_mask, gen_helper_vlw_v_d_mask },
{ gen_helper_vle_v_b_mask, gen_helper_vle_v_h_mask,
gen_helper_vle_v_w_mask, gen_helper_vle_v_d_mask },
{ gen_helper_vlbu_v_b_mask, gen_helper_vlbu_v_h_mask,
gen_helper_vlbu_v_w_mask, gen_helper_vlbu_v_d_mask },
{ NULL, gen_helper_vlhu_v_h_mask,
gen_helper_vlhu_v_w_mask, gen_helper_vlhu_v_d_mask },
{ NULL, NULL,
gen_helper_vlwu_v_w_mask, gen_helper_vlwu_v_d_mask } },
/* unmasked unit stride load */
{ { gen_helper_vlb_v_b, gen_helper_vlb_v_h,
gen_helper_vlb_v_w, gen_helper_vlb_v_d },
{ NULL, gen_helper_vlh_v_h,
gen_helper_vlh_v_w, gen_helper_vlh_v_d },
{ NULL, NULL,
gen_helper_vlw_v_w, gen_helper_vlw_v_d },
{ gen_helper_vle_v_b, gen_helper_vle_v_h,
gen_helper_vle_v_w, gen_helper_vle_v_d },
{ gen_helper_vlbu_v_b, gen_helper_vlbu_v_h,
gen_helper_vlbu_v_w, gen_helper_vlbu_v_d },
{ NULL, gen_helper_vlhu_v_h,
gen_helper_vlhu_v_w, gen_helper_vlhu_v_d },
{ NULL, NULL,
gen_helper_vlwu_v_w, gen_helper_vlwu_v_d } }
};
fn = fns[a->vm][seq][s->sew];
if (fn == NULL) {
return false;
}
data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
data = FIELD_DP32(data, VDATA, VM, a->vm);
data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
data = FIELD_DP32(data, VDATA, NF, a->nf);
return ldst_us_trans(a->rd, a->rs1, data, fn, s);
}
static bool ld_us_check(DisasContext *s, arg_r2nfvm* a)
{
return (vext_check_isa_ill(s) &&
vext_check_overlap_mask(s, a->rd, a->vm, false) &&
vext_check_reg(s, a->rd, false) &&
vext_check_nf(s, a->nf));
}
GEN_VEXT_TRANS(vlb_v, 0, r2nfvm, ld_us_op, ld_us_check)
GEN_VEXT_TRANS(vlh_v, 1, r2nfvm, ld_us_op, ld_us_check)
GEN_VEXT_TRANS(vlw_v, 2, r2nfvm, ld_us_op, ld_us_check)
GEN_VEXT_TRANS(vle_v, 3, r2nfvm, ld_us_op, ld_us_check)
GEN_VEXT_TRANS(vlbu_v, 4, r2nfvm, ld_us_op, ld_us_check)
GEN_VEXT_TRANS(vlhu_v, 5, r2nfvm, ld_us_op, ld_us_check)
GEN_VEXT_TRANS(vlwu_v, 6, r2nfvm, ld_us_op, ld_us_check)
static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
{
uint32_t data = 0;
gen_helper_ldst_us *fn;
static gen_helper_ldst_us * const fns[2][4][4] = {
/* masked unit stride load and store */
{ { gen_helper_vsb_v_b_mask, gen_helper_vsb_v_h_mask,
gen_helper_vsb_v_w_mask, gen_helper_vsb_v_d_mask },
{ NULL, gen_helper_vsh_v_h_mask,
gen_helper_vsh_v_w_mask, gen_helper_vsh_v_d_mask },
{ NULL, NULL,
gen_helper_vsw_v_w_mask, gen_helper_vsw_v_d_mask },
{ gen_helper_vse_v_b_mask, gen_helper_vse_v_h_mask,
gen_helper_vse_v_w_mask, gen_helper_vse_v_d_mask } },
/* unmasked unit stride store */
{ { gen_helper_vsb_v_b, gen_helper_vsb_v_h,
gen_helper_vsb_v_w, gen_helper_vsb_v_d },
{ NULL, gen_helper_vsh_v_h,
gen_helper_vsh_v_w, gen_helper_vsh_v_d },
{ NULL, NULL,
gen_helper_vsw_v_w, gen_helper_vsw_v_d },
{ gen_helper_vse_v_b, gen_helper_vse_v_h,
gen_helper_vse_v_w, gen_helper_vse_v_d } }
};
fn = fns[a->vm][seq][s->sew];
if (fn == NULL) {
return false;
}
data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
data = FIELD_DP32(data, VDATA, VM, a->vm);
data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
data = FIELD_DP32(data, VDATA, NF, a->nf);
return ldst_us_trans(a->rd, a->rs1, data, fn, s);
}
static bool st_us_check(DisasContext *s, arg_r2nfvm* a)
{
return (vext_check_isa_ill(s) &&
vext_check_reg(s, a->rd, false) &&
vext_check_nf(s, a->nf));
}
GEN_VEXT_TRANS(vsb_v, 0, r2nfvm, st_us_op, st_us_check)
GEN_VEXT_TRANS(vsh_v, 1, r2nfvm, st_us_op, st_us_check)
GEN_VEXT_TRANS(vsw_v, 2, r2nfvm, st_us_op, st_us_check)
GEN_VEXT_TRANS(vse_v, 3, r2nfvm, st_us_op, st_us_check)
/*
*** stride load and store
*/
typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv,
TCGv, TCGv_env, TCGv_i32);
static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
uint32_t data, gen_helper_ldst_stride *fn,
DisasContext *s)
{
TCGv_ptr dest, mask;
TCGv base, stride;
TCGv_i32 desc;
TCGLabel *over = gen_new_label();
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
dest = tcg_temp_new_ptr();
mask = tcg_temp_new_ptr();
base = tcg_temp_new();
stride = tcg_temp_new();
desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
gen_get_gpr(base, rs1);
gen_get_gpr(stride, rs2);
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
fn(dest, mask, base, stride, cpu_env, desc);
tcg_temp_free_ptr(dest);
tcg_temp_free_ptr(mask);
tcg_temp_free(base);
tcg_temp_free(stride);
tcg_temp_free_i32(desc);
gen_set_label(over);
return true;
}
static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
{
uint32_t data = 0;
gen_helper_ldst_stride *fn;
static gen_helper_ldst_stride * const fns[7][4] = {
{ gen_helper_vlsb_v_b, gen_helper_vlsb_v_h,
gen_helper_vlsb_v_w, gen_helper_vlsb_v_d },
{ NULL, gen_helper_vlsh_v_h,
gen_helper_vlsh_v_w, gen_helper_vlsh_v_d },
{ NULL, NULL,
gen_helper_vlsw_v_w, gen_helper_vlsw_v_d },
{ gen_helper_vlse_v_b, gen_helper_vlse_v_h,
gen_helper_vlse_v_w, gen_helper_vlse_v_d },
{ gen_helper_vlsbu_v_b, gen_helper_vlsbu_v_h,
gen_helper_vlsbu_v_w, gen_helper_vlsbu_v_d },
{ NULL, gen_helper_vlshu_v_h,
gen_helper_vlshu_v_w, gen_helper_vlshu_v_d },
{ NULL, NULL,
gen_helper_vlswu_v_w, gen_helper_vlswu_v_d },
};
fn = fns[seq][s->sew];
if (fn == NULL) {
return false;
}
data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
data = FIELD_DP32(data, VDATA, VM, a->vm);
data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
data = FIELD_DP32(data, VDATA, NF, a->nf);
return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
}
static bool ld_stride_check(DisasContext *s, arg_rnfvm* a)
{
return (vext_check_isa_ill(s) &&
vext_check_overlap_mask(s, a->rd, a->vm, false) &&
vext_check_reg(s, a->rd, false) &&
vext_check_nf(s, a->nf));
}
GEN_VEXT_TRANS(vlsb_v, 0, rnfvm, ld_stride_op, ld_stride_check)
GEN_VEXT_TRANS(vlsh_v, 1, rnfvm, ld_stride_op, ld_stride_check)
GEN_VEXT_TRANS(vlsw_v, 2, rnfvm, ld_stride_op, ld_stride_check)
GEN_VEXT_TRANS(vlse_v, 3, rnfvm, ld_stride_op, ld_stride_check)
GEN_VEXT_TRANS(vlsbu_v, 4, rnfvm, ld_stride_op, ld_stride_check)
GEN_VEXT_TRANS(vlshu_v, 5, rnfvm, ld_stride_op, ld_stride_check)
GEN_VEXT_TRANS(vlswu_v, 6, rnfvm, ld_stride_op, ld_stride_check)
static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
{
uint32_t data = 0;
gen_helper_ldst_stride *fn;
static gen_helper_ldst_stride * const fns[4][4] = {
/* masked stride store */
{ gen_helper_vssb_v_b, gen_helper_vssb_v_h,
gen_helper_vssb_v_w, gen_helper_vssb_v_d },
{ NULL, gen_helper_vssh_v_h,
gen_helper_vssh_v_w, gen_helper_vssh_v_d },
{ NULL, NULL,
gen_helper_vssw_v_w, gen_helper_vssw_v_d },
{ gen_helper_vsse_v_b, gen_helper_vsse_v_h,
gen_helper_vsse_v_w, gen_helper_vsse_v_d }
};
data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
data = FIELD_DP32(data, VDATA, VM, a->vm);
data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
data = FIELD_DP32(data, VDATA, NF, a->nf);
fn = fns[seq][s->sew];
if (fn == NULL) {
return false;
}
return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
}
static bool st_stride_check(DisasContext *s, arg_rnfvm* a)
{
return (vext_check_isa_ill(s) &&
vext_check_reg(s, a->rd, false) &&
vext_check_nf(s, a->nf));
}
GEN_VEXT_TRANS(vssb_v, 0, rnfvm, st_stride_op, st_stride_check)
GEN_VEXT_TRANS(vssh_v, 1, rnfvm, st_stride_op, st_stride_check)
GEN_VEXT_TRANS(vssw_v, 2, rnfvm, st_stride_op, st_stride_check)
GEN_VEXT_TRANS(vsse_v, 3, rnfvm, st_stride_op, st_stride_check)