mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-06 09:13:55 -06:00
tcg/i386: Split out tcg_out_vex_modrm_type
Helper function to handle setting of VEXL based on the type of the operation. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
8dd2ea7515
commit
bc97b3ad31
1 changed files with 15 additions and 23 deletions
|
@ -711,6 +711,15 @@ static void tcg_out_vex_modrm(TCGContext *s, int opc, int r, int v, int rm)
|
||||||
tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
|
tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void tcg_out_vex_modrm_type(TCGContext *s, int opc,
|
||||||
|
int r, int v, int rm, TCGType type)
|
||||||
|
{
|
||||||
|
if (type == TCG_TYPE_V256) {
|
||||||
|
opc |= P_VEXL;
|
||||||
|
}
|
||||||
|
tcg_out_vex_modrm(s, opc, r, v, rm);
|
||||||
|
}
|
||||||
|
|
||||||
/* Output an opcode with a full "rm + (index<<shift) + offset" address mode.
|
/* Output an opcode with a full "rm + (index<<shift) + offset" address mode.
|
||||||
We handle either RM and INDEX missing with a negative value. In 64-bit
|
We handle either RM and INDEX missing with a negative value. In 64-bit
|
||||||
mode for absolute addresses, ~RM is the size of the immediate operand
|
mode for absolute addresses, ~RM is the size of the immediate operand
|
||||||
|
@ -904,8 +913,7 @@ static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
|
||||||
TCGReg r, TCGReg a)
|
TCGReg r, TCGReg a)
|
||||||
{
|
{
|
||||||
if (have_avx2) {
|
if (have_avx2) {
|
||||||
int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0);
|
tcg_out_vex_modrm_type(s, avx2_dup_insn[vece], r, 0, a, type);
|
||||||
tcg_out_vex_modrm(s, avx2_dup_insn[vece] + vex_l, r, 0, a);
|
|
||||||
} else {
|
} else {
|
||||||
switch (vece) {
|
switch (vece) {
|
||||||
case MO_8:
|
case MO_8:
|
||||||
|
@ -3231,10 +3239,7 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
|
||||||
goto gen_simd;
|
goto gen_simd;
|
||||||
gen_simd:
|
gen_simd:
|
||||||
tcg_debug_assert(insn != OPC_UD2);
|
tcg_debug_assert(insn != OPC_UD2);
|
||||||
if (type == TCG_TYPE_V256) {
|
tcg_out_vex_modrm_type(s, insn, a0, a1, a2, type);
|
||||||
insn |= P_VEXL;
|
|
||||||
}
|
|
||||||
tcg_out_vex_modrm(s, insn, a0, a1, a2);
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case INDEX_op_cmp_vec:
|
case INDEX_op_cmp_vec:
|
||||||
|
@ -3250,10 +3255,7 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
|
||||||
|
|
||||||
case INDEX_op_andc_vec:
|
case INDEX_op_andc_vec:
|
||||||
insn = OPC_PANDN;
|
insn = OPC_PANDN;
|
||||||
if (type == TCG_TYPE_V256) {
|
tcg_out_vex_modrm_type(s, insn, a0, a2, a1, type);
|
||||||
insn |= P_VEXL;
|
|
||||||
}
|
|
||||||
tcg_out_vex_modrm(s, insn, a0, a2, a1);
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case INDEX_op_shli_vec:
|
case INDEX_op_shli_vec:
|
||||||
|
@ -3281,10 +3283,7 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
|
||||||
goto gen_shift;
|
goto gen_shift;
|
||||||
gen_shift:
|
gen_shift:
|
||||||
tcg_debug_assert(vece != MO_8);
|
tcg_debug_assert(vece != MO_8);
|
||||||
if (type == TCG_TYPE_V256) {
|
tcg_out_vex_modrm_type(s, insn, sub, a0, a1, type);
|
||||||
insn |= P_VEXL;
|
|
||||||
}
|
|
||||||
tcg_out_vex_modrm(s, insn, sub, a0, a1);
|
|
||||||
tcg_out8(s, a2);
|
tcg_out8(s, a2);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -3361,19 +3360,12 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
|
||||||
|
|
||||||
gen_simd_imm8:
|
gen_simd_imm8:
|
||||||
tcg_debug_assert(insn != OPC_UD2);
|
tcg_debug_assert(insn != OPC_UD2);
|
||||||
if (type == TCG_TYPE_V256) {
|
tcg_out_vex_modrm_type(s, insn, a0, a1, a2, type);
|
||||||
insn |= P_VEXL;
|
|
||||||
}
|
|
||||||
tcg_out_vex_modrm(s, insn, a0, a1, a2);
|
|
||||||
tcg_out8(s, sub);
|
tcg_out8(s, sub);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case INDEX_op_x86_vpblendvb_vec:
|
case INDEX_op_x86_vpblendvb_vec:
|
||||||
insn = OPC_VPBLENDVB;
|
tcg_out_vex_modrm_type(s, OPC_VPBLENDVB, a0, a1, a2, type);
|
||||||
if (type == TCG_TYPE_V256) {
|
|
||||||
insn |= P_VEXL;
|
|
||||||
}
|
|
||||||
tcg_out_vex_modrm(s, insn, a0, a1, a2);
|
|
||||||
tcg_out8(s, args[3] << 4);
|
tcg_out8(s, args[3] << 4);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue