mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-08 10:13:56 -06:00
target/arm: Create gen_gvec_{sri,sli}
The functions eliminate duplication of the special cases for this operation. They match up with the GVecGen2iFn typedef. Add out-of-line helpers. We got away with only having inline expanders because the neon vector size is only 16 bytes, and we know that the inline expansion will always succeed. When we reuse this for SVE, tcg-gvec-op may decide to use an out-of-line helper due to longer vector lengths. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20200513163245.17915-4-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
6ccd48d4ea
commit
893ab0542a
5 changed files with 160 additions and 101 deletions
|
@ -4454,47 +4454,62 @@ static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
|
|||
|
||||
static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
|
||||
{
|
||||
if (sh == 0) {
|
||||
tcg_gen_mov_vec(d, a);
|
||||
} else {
|
||||
TCGv_vec t = tcg_temp_new_vec_matching(d);
|
||||
TCGv_vec m = tcg_temp_new_vec_matching(d);
|
||||
TCGv_vec t = tcg_temp_new_vec_matching(d);
|
||||
TCGv_vec m = tcg_temp_new_vec_matching(d);
|
||||
|
||||
tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
|
||||
tcg_gen_shri_vec(vece, t, a, sh);
|
||||
tcg_gen_and_vec(vece, d, d, m);
|
||||
tcg_gen_or_vec(vece, d, d, t);
|
||||
tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
|
||||
tcg_gen_shri_vec(vece, t, a, sh);
|
||||
tcg_gen_and_vec(vece, d, d, m);
|
||||
tcg_gen_or_vec(vece, d, d, t);
|
||||
|
||||
tcg_temp_free_vec(t);
|
||||
tcg_temp_free_vec(m);
|
||||
}
|
||||
tcg_temp_free_vec(t);
|
||||
tcg_temp_free_vec(m);
|
||||
}
|
||||
|
||||
static const TCGOpcode vecop_list_sri[] = { INDEX_op_shri_vec, 0 };
|
||||
void gen_gvec_sri(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
||||
int64_t shift, uint32_t opr_sz, uint32_t max_sz)
|
||||
{
|
||||
static const TCGOpcode vecop_list[] = { INDEX_op_shri_vec, 0 };
|
||||
const GVecGen2i ops[4] = {
|
||||
{ .fni8 = gen_shr8_ins_i64,
|
||||
.fniv = gen_shr_ins_vec,
|
||||
.fno = gen_helper_gvec_sri_b,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list,
|
||||
.vece = MO_8 },
|
||||
{ .fni8 = gen_shr16_ins_i64,
|
||||
.fniv = gen_shr_ins_vec,
|
||||
.fno = gen_helper_gvec_sri_h,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list,
|
||||
.vece = MO_16 },
|
||||
{ .fni4 = gen_shr32_ins_i32,
|
||||
.fniv = gen_shr_ins_vec,
|
||||
.fno = gen_helper_gvec_sri_s,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list,
|
||||
.vece = MO_32 },
|
||||
{ .fni8 = gen_shr64_ins_i64,
|
||||
.fniv = gen_shr_ins_vec,
|
||||
.fno = gen_helper_gvec_sri_d,
|
||||
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list,
|
||||
.vece = MO_64 },
|
||||
};
|
||||
|
||||
const GVecGen2i sri_op[4] = {
|
||||
{ .fni8 = gen_shr8_ins_i64,
|
||||
.fniv = gen_shr_ins_vec,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list_sri,
|
||||
.vece = MO_8 },
|
||||
{ .fni8 = gen_shr16_ins_i64,
|
||||
.fniv = gen_shr_ins_vec,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list_sri,
|
||||
.vece = MO_16 },
|
||||
{ .fni4 = gen_shr32_ins_i32,
|
||||
.fniv = gen_shr_ins_vec,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list_sri,
|
||||
.vece = MO_32 },
|
||||
{ .fni8 = gen_shr64_ins_i64,
|
||||
.fniv = gen_shr_ins_vec,
|
||||
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list_sri,
|
||||
.vece = MO_64 },
|
||||
};
|
||||
/* tszimm encoding produces immediates in the range [1..esize]. */
|
||||
tcg_debug_assert(shift > 0);
|
||||
tcg_debug_assert(shift <= (8 << vece));
|
||||
|
||||
/* Shift of esize leaves destination unchanged. */
|
||||
if (shift < (8 << vece)) {
|
||||
tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
|
||||
} else {
|
||||
/* Nop, but we do need to clear the tail. */
|
||||
tcg_gen_gvec_mov(vece, rd_ofs, rd_ofs, opr_sz, max_sz);
|
||||
}
|
||||
}
|
||||
|
||||
static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
|
||||
{
|
||||
|
@ -4532,47 +4547,60 @@ static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
|
|||
|
||||
static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
|
||||
{
|
||||
if (sh == 0) {
|
||||
tcg_gen_mov_vec(d, a);
|
||||
} else {
|
||||
TCGv_vec t = tcg_temp_new_vec_matching(d);
|
||||
TCGv_vec m = tcg_temp_new_vec_matching(d);
|
||||
TCGv_vec t = tcg_temp_new_vec_matching(d);
|
||||
TCGv_vec m = tcg_temp_new_vec_matching(d);
|
||||
|
||||
tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
|
||||
tcg_gen_shli_vec(vece, t, a, sh);
|
||||
tcg_gen_and_vec(vece, d, d, m);
|
||||
tcg_gen_or_vec(vece, d, d, t);
|
||||
tcg_gen_shli_vec(vece, t, a, sh);
|
||||
tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
|
||||
tcg_gen_and_vec(vece, d, d, m);
|
||||
tcg_gen_or_vec(vece, d, d, t);
|
||||
|
||||
tcg_temp_free_vec(t);
|
||||
tcg_temp_free_vec(m);
|
||||
}
|
||||
tcg_temp_free_vec(t);
|
||||
tcg_temp_free_vec(m);
|
||||
}
|
||||
|
||||
static const TCGOpcode vecop_list_sli[] = { INDEX_op_shli_vec, 0 };
|
||||
void gen_gvec_sli(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
||||
int64_t shift, uint32_t opr_sz, uint32_t max_sz)
|
||||
{
|
||||
static const TCGOpcode vecop_list[] = { INDEX_op_shli_vec, 0 };
|
||||
const GVecGen2i ops[4] = {
|
||||
{ .fni8 = gen_shl8_ins_i64,
|
||||
.fniv = gen_shl_ins_vec,
|
||||
.fno = gen_helper_gvec_sli_b,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list,
|
||||
.vece = MO_8 },
|
||||
{ .fni8 = gen_shl16_ins_i64,
|
||||
.fniv = gen_shl_ins_vec,
|
||||
.fno = gen_helper_gvec_sli_h,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list,
|
||||
.vece = MO_16 },
|
||||
{ .fni4 = gen_shl32_ins_i32,
|
||||
.fniv = gen_shl_ins_vec,
|
||||
.fno = gen_helper_gvec_sli_s,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list,
|
||||
.vece = MO_32 },
|
||||
{ .fni8 = gen_shl64_ins_i64,
|
||||
.fniv = gen_shl_ins_vec,
|
||||
.fno = gen_helper_gvec_sli_d,
|
||||
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list,
|
||||
.vece = MO_64 },
|
||||
};
|
||||
|
||||
const GVecGen2i sli_op[4] = {
|
||||
{ .fni8 = gen_shl8_ins_i64,
|
||||
.fniv = gen_shl_ins_vec,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list_sli,
|
||||
.vece = MO_8 },
|
||||
{ .fni8 = gen_shl16_ins_i64,
|
||||
.fniv = gen_shl_ins_vec,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list_sli,
|
||||
.vece = MO_16 },
|
||||
{ .fni4 = gen_shl32_ins_i32,
|
||||
.fniv = gen_shl_ins_vec,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list_sli,
|
||||
.vece = MO_32 },
|
||||
{ .fni8 = gen_shl64_ins_i64,
|
||||
.fniv = gen_shl_ins_vec,
|
||||
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list_sli,
|
||||
.vece = MO_64 },
|
||||
};
|
||||
/* tszimm encoding produces immediates in the range [0..esize-1]. */
|
||||
tcg_debug_assert(shift >= 0);
|
||||
tcg_debug_assert(shift < (8 << vece));
|
||||
|
||||
if (shift == 0) {
|
||||
tcg_gen_gvec_mov(vece, rd_ofs, rm_ofs, opr_sz, max_sz);
|
||||
} else {
|
||||
tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
|
||||
}
|
||||
}
|
||||
|
||||
static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
|
||||
{
|
||||
|
@ -5715,20 +5743,14 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
|
|||
}
|
||||
/* Right shift comes here negative. */
|
||||
shift = -shift;
|
||||
/* Shift out of range leaves destination unchanged. */
|
||||
if (shift < 8 << size) {
|
||||
tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
|
||||
shift, &sri_op[size]);
|
||||
}
|
||||
gen_gvec_sri(size, rd_ofs, rm_ofs, shift,
|
||||
vec_size, vec_size);
|
||||
return 0;
|
||||
|
||||
case 5: /* VSHL, VSLI */
|
||||
if (u) { /* VSLI */
|
||||
/* Shift out of range leaves destination unchanged. */
|
||||
if (shift < 8 << size) {
|
||||
tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
|
||||
vec_size, shift, &sli_op[size]);
|
||||
}
|
||||
gen_gvec_sli(size, rd_ofs, rm_ofs, shift,
|
||||
vec_size, vec_size);
|
||||
} else { /* VSHL */
|
||||
/* Shifts larger than the element size are
|
||||
* architecturally valid and results in zero.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue