target/arm: Introduce gen_gvec_{s,u}{add,ada}lp

Pairwise addition with and without accumulation.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20241211163036.2297116-46-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Richard Henderson 2024-12-11 10:30:12 -06:00 committed by Peter Maydell
parent e90cf92209
commit c14bde6998
5 changed files with 243 additions and 170 deletions

View file

@ -397,8 +397,6 @@ DEF_HELPER_1(neon_widen_s16, i64, i32)
DEF_HELPER_2(neon_addl_u16, i64, i64, i64)
DEF_HELPER_2(neon_addl_u32, i64, i64, i64)
DEF_HELPER_2(neon_paddl_u16, i64, i64, i64)
DEF_HELPER_2(neon_paddl_u32, i64, i64, i64)
DEF_HELPER_FLAGS_1(neon_addlp_s8, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_FLAGS_1(neon_addlp_s16, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_2(neon_subl_u16, i64, i64, i64)

View file

@ -2467,3 +2467,233 @@ void gen_gvec_rev64(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
g_assert_not_reached();
}
}
static void gen_saddlp_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
{
int half = 4 << vece;
TCGv_vec t = tcg_temp_new_vec_matching(d);
tcg_gen_shli_vec(vece, t, n, half);
tcg_gen_sari_vec(vece, d, n, half);
tcg_gen_sari_vec(vece, t, t, half);
tcg_gen_add_vec(vece, d, d, t);
}
static void gen_saddlp_s_i64(TCGv_i64 d, TCGv_i64 n)
{
TCGv_i64 t = tcg_temp_new_i64();
tcg_gen_ext32s_i64(t, n);
tcg_gen_sari_i64(d, n, 32);
tcg_gen_add_i64(d, d, t);
}
void gen_gvec_saddlp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t opr_sz, uint32_t max_sz)
{
static const TCGOpcode vecop_list[] = {
INDEX_op_sari_vec, INDEX_op_shli_vec, INDEX_op_add_vec, 0
};
static const GVecGen2 g[] = {
{ .fniv = gen_saddlp_vec,
.fni8 = gen_helper_neon_addlp_s8,
.opt_opc = vecop_list,
.vece = MO_16 },
{ .fniv = gen_saddlp_vec,
.fni8 = gen_helper_neon_addlp_s16,
.opt_opc = vecop_list,
.vece = MO_32 },
{ .fniv = gen_saddlp_vec,
.fni8 = gen_saddlp_s_i64,
.opt_opc = vecop_list,
.vece = MO_64 },
};
assert(vece <= MO_32);
tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g[vece]);
}
static void gen_sadalp_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
{
TCGv_vec t = tcg_temp_new_vec_matching(d);
gen_saddlp_vec(vece, t, n);
tcg_gen_add_vec(vece, d, d, t);
}
static void gen_sadalp_b_i64(TCGv_i64 d, TCGv_i64 n)
{
TCGv_i64 t = tcg_temp_new_i64();
gen_helper_neon_addlp_s8(t, n);
tcg_gen_vec_add16_i64(d, d, t);
}
static void gen_sadalp_h_i64(TCGv_i64 d, TCGv_i64 n)
{
TCGv_i64 t = tcg_temp_new_i64();
gen_helper_neon_addlp_s16(t, n);
tcg_gen_vec_add32_i64(d, d, t);
}
static void gen_sadalp_s_i64(TCGv_i64 d, TCGv_i64 n)
{
TCGv_i64 t = tcg_temp_new_i64();
gen_saddlp_s_i64(t, n);
tcg_gen_add_i64(d, d, t);
}
void gen_gvec_sadalp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t opr_sz, uint32_t max_sz)
{
static const TCGOpcode vecop_list[] = {
INDEX_op_sari_vec, INDEX_op_shli_vec, INDEX_op_add_vec, 0
};
static const GVecGen2 g[] = {
{ .fniv = gen_sadalp_vec,
.fni8 = gen_sadalp_b_i64,
.opt_opc = vecop_list,
.load_dest = true,
.vece = MO_16 },
{ .fniv = gen_sadalp_vec,
.fni8 = gen_sadalp_h_i64,
.opt_opc = vecop_list,
.load_dest = true,
.vece = MO_32 },
{ .fniv = gen_sadalp_vec,
.fni8 = gen_sadalp_s_i64,
.opt_opc = vecop_list,
.load_dest = true,
.vece = MO_64 },
};
assert(vece <= MO_32);
tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g[vece]);
}
static void gen_uaddlp_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
{
int half = 4 << vece;
TCGv_vec t = tcg_temp_new_vec_matching(d);
TCGv_vec m = tcg_constant_vec_matching(d, vece, MAKE_64BIT_MASK(0, half));
tcg_gen_shri_vec(vece, t, n, half);
tcg_gen_and_vec(vece, d, n, m);
tcg_gen_add_vec(vece, d, d, t);
}
static void gen_uaddlp_b_i64(TCGv_i64 d, TCGv_i64 n)
{
TCGv_i64 t = tcg_temp_new_i64();
TCGv_i64 m = tcg_constant_i64(dup_const(MO_16, 0xff));
tcg_gen_shri_i64(t, n, 8);
tcg_gen_and_i64(d, n, m);
tcg_gen_and_i64(t, t, m);
/* No carry between widened unsigned elements. */
tcg_gen_add_i64(d, d, t);
}
static void gen_uaddlp_h_i64(TCGv_i64 d, TCGv_i64 n)
{
TCGv_i64 t = tcg_temp_new_i64();
TCGv_i64 m = tcg_constant_i64(dup_const(MO_32, 0xffff));
tcg_gen_shri_i64(t, n, 16);
tcg_gen_and_i64(d, n, m);
tcg_gen_and_i64(t, t, m);
/* No carry between widened unsigned elements. */
tcg_gen_add_i64(d, d, t);
}
static void gen_uaddlp_s_i64(TCGv_i64 d, TCGv_i64 n)
{
TCGv_i64 t = tcg_temp_new_i64();
tcg_gen_ext32u_i64(t, n);
tcg_gen_shri_i64(d, n, 32);
tcg_gen_add_i64(d, d, t);
}
void gen_gvec_uaddlp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t opr_sz, uint32_t max_sz)
{
static const TCGOpcode vecop_list[] = {
INDEX_op_shri_vec, INDEX_op_add_vec, 0
};
static const GVecGen2 g[] = {
{ .fniv = gen_uaddlp_vec,
.fni8 = gen_uaddlp_b_i64,
.opt_opc = vecop_list,
.vece = MO_16 },
{ .fniv = gen_uaddlp_vec,
.fni8 = gen_uaddlp_h_i64,
.opt_opc = vecop_list,
.vece = MO_32 },
{ .fniv = gen_uaddlp_vec,
.fni8 = gen_uaddlp_s_i64,
.opt_opc = vecop_list,
.vece = MO_64 },
};
assert(vece <= MO_32);
tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g[vece]);
}
static void gen_uadalp_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
{
TCGv_vec t = tcg_temp_new_vec_matching(d);
gen_uaddlp_vec(vece, t, n);
tcg_gen_add_vec(vece, d, d, t);
}
static void gen_uadalp_b_i64(TCGv_i64 d, TCGv_i64 n)
{
TCGv_i64 t = tcg_temp_new_i64();
gen_uaddlp_b_i64(t, n);
tcg_gen_vec_add16_i64(d, d, t);
}
static void gen_uadalp_h_i64(TCGv_i64 d, TCGv_i64 n)
{
TCGv_i64 t = tcg_temp_new_i64();
gen_uaddlp_h_i64(t, n);
tcg_gen_vec_add32_i64(d, d, t);
}
static void gen_uadalp_s_i64(TCGv_i64 d, TCGv_i64 n)
{
TCGv_i64 t = tcg_temp_new_i64();
gen_uaddlp_s_i64(t, n);
tcg_gen_add_i64(d, d, t);
}
void gen_gvec_uadalp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t opr_sz, uint32_t max_sz)
{
static const TCGOpcode vecop_list[] = {
INDEX_op_shri_vec, INDEX_op_add_vec, 0
};
static const GVecGen2 g[] = {
{ .fniv = gen_uadalp_vec,
.fni8 = gen_uadalp_b_i64,
.load_dest = true,
.opt_opc = vecop_list,
.vece = MO_16 },
{ .fniv = gen_uadalp_vec,
.fni8 = gen_uadalp_h_i64,
.load_dest = true,
.opt_opc = vecop_list,
.vece = MO_32 },
{ .fniv = gen_uadalp_vec,
.fni8 = gen_uadalp_s_i64,
.load_dest = true,
.opt_opc = vecop_list,
.vece = MO_64 },
};
assert(vece <= MO_32);
tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g[vece]);
}

View file

@ -844,28 +844,6 @@ uint64_t HELPER(neon_addl_u32)(uint64_t a, uint64_t b)
return (a + b) ^ mask;
}
uint64_t HELPER(neon_paddl_u16)(uint64_t a, uint64_t b)
{
uint64_t tmp;
uint64_t tmp2;
tmp = a & 0x0000ffff0000ffffull;
tmp += (a >> 16) & 0x0000ffff0000ffffull;
tmp2 = b & 0xffff0000ffff0000ull;
tmp2 += (b << 16) & 0xffff0000ffff0000ull;
return ( tmp & 0xffff)
| ((tmp >> 16) & 0xffff0000ull)
| ((tmp2 << 16) & 0xffff00000000ull)
| ( tmp2 & 0xffff000000000000ull);
}
uint64_t HELPER(neon_paddl_u32)(uint64_t a, uint64_t b)
{
uint32_t low = a + (a >> 32);
uint32_t high = b + (b >> 32);
return low + ((uint64_t)high << 32);
}
/* Pairwise long add: add pairs of adjacent elements into
* double-width elements in the result (eg _s8 is an 8x8->16 op)
*/

View file

@ -2565,152 +2565,6 @@ static bool trans_VDUP_scalar(DisasContext *s, arg_VDUP_scalar *a)
return true;
}
static bool do_2misc_pairwise(DisasContext *s, arg_2misc *a,
NeonGenWidenFn *widenfn,
NeonGenTwo64OpFn *opfn,
NeonGenTwo64OpFn *accfn)
{
/*
* Pairwise long operations: widen both halves of the pair,
* combine the pairs with the opfn, and then possibly accumulate
* into the destination with the accfn.
*/
int pass;
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
return false;
}
/* UNDEF accesses to D16-D31 if they don't exist. */
if (!dc_isar_feature(aa32_simd_r32, s) &&
((a->vd | a->vm) & 0x10)) {
return false;
}
if ((a->vd | a->vm) & a->q) {
return false;
}
if (!widenfn) {
return false;
}
if (!vfp_access_check(s)) {
return true;
}
for (pass = 0; pass < a->q + 1; pass++) {
TCGv_i32 tmp;
TCGv_i64 rm0_64, rm1_64, rd_64;
rm0_64 = tcg_temp_new_i64();
rm1_64 = tcg_temp_new_i64();
rd_64 = tcg_temp_new_i64();
tmp = tcg_temp_new_i32();
read_neon_element32(tmp, a->vm, pass * 2, MO_32);
widenfn(rm0_64, tmp);
read_neon_element32(tmp, a->vm, pass * 2 + 1, MO_32);
widenfn(rm1_64, tmp);
opfn(rd_64, rm0_64, rm1_64);
if (accfn) {
TCGv_i64 tmp64 = tcg_temp_new_i64();
read_neon_element64(tmp64, a->vd, pass, MO_64);
accfn(rd_64, tmp64, rd_64);
}
write_neon_element64(rd_64, a->vd, pass, MO_64);
}
return true;
}
static bool trans_VPADDL_S(DisasContext *s, arg_2misc *a)
{
static NeonGenWidenFn * const widenfn[] = {
gen_helper_neon_widen_s8,
gen_helper_neon_widen_s16,
tcg_gen_ext_i32_i64,
NULL,
};
static NeonGenTwo64OpFn * const opfn[] = {
gen_helper_neon_paddl_u16,
gen_helper_neon_paddl_u32,
tcg_gen_add_i64,
NULL,
};
return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size], NULL);
}
static bool trans_VPADDL_U(DisasContext *s, arg_2misc *a)
{
static NeonGenWidenFn * const widenfn[] = {
gen_helper_neon_widen_u8,
gen_helper_neon_widen_u16,
tcg_gen_extu_i32_i64,
NULL,
};
static NeonGenTwo64OpFn * const opfn[] = {
gen_helper_neon_paddl_u16,
gen_helper_neon_paddl_u32,
tcg_gen_add_i64,
NULL,
};
return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size], NULL);
}
static bool trans_VPADAL_S(DisasContext *s, arg_2misc *a)
{
static NeonGenWidenFn * const widenfn[] = {
gen_helper_neon_widen_s8,
gen_helper_neon_widen_s16,
tcg_gen_ext_i32_i64,
NULL,
};
static NeonGenTwo64OpFn * const opfn[] = {
gen_helper_neon_paddl_u16,
gen_helper_neon_paddl_u32,
tcg_gen_add_i64,
NULL,
};
static NeonGenTwo64OpFn * const accfn[] = {
gen_helper_neon_addl_u16,
gen_helper_neon_addl_u32,
tcg_gen_add_i64,
NULL,
};
return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size],
accfn[a->size]);
}
static bool trans_VPADAL_U(DisasContext *s, arg_2misc *a)
{
static NeonGenWidenFn * const widenfn[] = {
gen_helper_neon_widen_u8,
gen_helper_neon_widen_u16,
tcg_gen_extu_i32_i64,
NULL,
};
static NeonGenTwo64OpFn * const opfn[] = {
gen_helper_neon_paddl_u16,
gen_helper_neon_paddl_u32,
tcg_gen_add_i64,
NULL,
};
static NeonGenTwo64OpFn * const accfn[] = {
gen_helper_neon_addl_u16,
gen_helper_neon_addl_u32,
tcg_gen_add_i64,
NULL,
};
return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size],
accfn[a->size]);
}
typedef void ZipFn(TCGv_ptr, TCGv_ptr);
static bool do_zip_uzp(DisasContext *s, arg_2misc *a,
@ -3071,6 +2925,10 @@ DO_2MISC_VEC(VCLT0, gen_gvec_clt0)
DO_2MISC_VEC(VCLS, gen_gvec_cls)
DO_2MISC_VEC(VCLZ, gen_gvec_clz)
DO_2MISC_VEC(VREV64, gen_gvec_rev64)
DO_2MISC_VEC(VPADDL_S, gen_gvec_saddlp)
DO_2MISC_VEC(VPADDL_U, gen_gvec_uaddlp)
DO_2MISC_VEC(VPADAL_S, gen_gvec_sadalp)
DO_2MISC_VEC(VPADAL_U, gen_gvec_uadalp)
static bool trans_VMVN(DisasContext *s, arg_2misc *a)
{

View file

@ -593,6 +593,15 @@ void gen_gvec_rev32(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
void gen_gvec_rev64(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t opr_sz, uint32_t max_sz);
void gen_gvec_saddlp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t opr_sz, uint32_t max_sz);
void gen_gvec_sadalp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t opr_sz, uint32_t max_sz);
void gen_gvec_uaddlp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t opr_sz, uint32_t max_sz);
void gen_gvec_uadalp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t opr_sz, uint32_t max_sz);
/*
* Forward to the isar_feature_* tests given a DisasContext pointer.
*/