tcg: Introduce TCG_COND_TST{EQ,NE}

target/alpha: Use TCG_COND_TST{EQ,NE}
 target/m68k: Use TCG_COND_TST{EQ,NE} in gen_fcc_cond
 target/sparc: Use TCG_COND_TSTEQ in gen_op_mulscc
 target/s390x: Use TCG_COND_TSTNE for CC_OP_{TM,ICM}
 target/s390x: Improve general case of disas_jcc
 -----BEGIN PGP SIGNATURE-----
 
 iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmXBpTAdHHJpY2hhcmQu
 aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/p6gf9HAasTSRECk2cvjW9
 /mcJy0AIaespnI50fG8fm48OoFl0847CdrsJycpZ1spw3W3Wb0cVbMbq/teNMjXZ
 0SGQJFk9Baq7wMhW7VzhSzJ96pcorpQprp7XBMdheLXqpT4zsM/EuwEAepBk8RUG
 3kCeo38dswXE681ZafZkd/8pPzII19sQK8eiMpceeYkBsbbep+DDcnE18Ee4kISS
 u0SbuslKVahxd86LKuzrcz0pNFcmFuR5jRP9hmbQ0MfeAn0Pxlndi+ayZNghfgPf
 3hDjskiionFwxb/OoRj45BssTWfDiluWl7IUsHfegPXCQ2Y+woT5Vq6TVGZn0GqS
 c6RLQQ==
 =TMiE
 -----END PGP SIGNATURE-----

Merge tag 'pull-tcg-20240205-2' of https://gitlab.com/rth7680/qemu into staging

tcg: Introduce TCG_COND_TST{EQ,NE}
target/alpha: Use TCG_COND_TST{EQ,NE}
target/m68k: Use TCG_COND_TST{EQ,NE} in gen_fcc_cond
target/sparc: Use TCG_COND_TSTEQ in gen_op_mulscc
target/s390x: Use TCG_COND_TSTNE for CC_OP_{TM,ICM}
target/s390x: Improve general case of disas_jcc

# -----BEGIN PGP SIGNATURE-----
#
# iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmXBpTAdHHJpY2hhcmQu
# aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/p6gf9HAasTSRECk2cvjW9
# /mcJy0AIaespnI50fG8fm48OoFl0847CdrsJycpZ1spw3W3Wb0cVbMbq/teNMjXZ
# 0SGQJFk9Baq7wMhW7VzhSzJ96pcorpQprp7XBMdheLXqpT4zsM/EuwEAepBk8RUG
# 3kCeo38dswXE681ZafZkd/8pPzII19sQK8eiMpceeYkBsbbep+DDcnE18Ee4kISS
# u0SbuslKVahxd86LKuzrcz0pNFcmFuR5jRP9hmbQ0MfeAn0Pxlndi+ayZNghfgPf
# 3hDjskiionFwxb/OoRj45BssTWfDiluWl7IUsHfegPXCQ2Y+woT5Vq6TVGZn0GqS
# c6RLQQ==
# =TMiE
# -----END PGP SIGNATURE-----
# gpg: Signature made Tue 06 Feb 2024 03:19:12 GMT
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* tag 'pull-tcg-20240205-2' of https://gitlab.com/rth7680/qemu: (39 commits)
  tcg/tci: Support TCG_COND_TST{EQ,NE}
  tcg/s390x: Support TCG_COND_TST{EQ,NE}
  tcg/s390x: Add TCG_CT_CONST_CMP
  tcg/s390x: Split constraint A into J+U
  tcg/ppc: Support TCG_COND_TST{EQ,NE}
  tcg/ppc: Add TCG_CT_CONST_CMP
  tcg/ppc: Tidy up tcg_target_const_match
  tcg/ppc: Use cr0 in tcg_to_bc and tcg_to_isel
  tcg/ppc: Sink tcg_to_bc usage into tcg_out_bc
  tcg/sparc64: Support TCG_COND_TST{EQ,NE}
  tcg/sparc64: Pass TCGCond to tcg_out_cmp
  tcg/sparc64: Hoist read of tcg_cond_to_rcond
  tcg/i386: Use TEST r,r to test 8/16/32 bits
  tcg/i386: Improve TSTNE/TESTEQ vs powers of two
  tcg/i386: Support TCG_COND_TST{EQ,NE}
  tcg/i386: Move tcg_cond_to_jcc[] into tcg_out_cmp
  tcg/i386: Pass x86 condition codes to tcg_out_cmov
  tcg/arm: Support TCG_COND_TST{EQ,NE}
  tcg/arm: Split out tcg_out_cmp()
  tcg/aarch64: Generate CBNZ for TSTNE of UINT32_MAX
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2024-02-08 16:08:42 +00:00
commit 03e4bc0bc0
38 changed files with 1379 additions and 595 deletions

View file

@ -453,13 +453,13 @@ static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
}
static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
TCGv cmp, int32_t disp)
TCGv cmp, uint64_t imm, int32_t disp)
{
uint64_t dest = ctx->base.pc_next + (disp << 2);
TCGLabel *lab_true = gen_new_label();
if (use_goto_tb(ctx, dest)) {
tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
tcg_gen_brcondi_i64(cond, cmp, imm, lab_true);
tcg_gen_goto_tb(0);
tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
@ -472,81 +472,71 @@ static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
return DISAS_NORETURN;
} else {
TCGv_i64 z = load_zero(ctx);
TCGv_i64 i = tcg_constant_i64(imm);
TCGv_i64 d = tcg_constant_i64(dest);
TCGv_i64 p = tcg_constant_i64(ctx->base.pc_next);
tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
tcg_gen_movcond_i64(cond, cpu_pc, cmp, i, d, p);
return DISAS_PC_UPDATED;
}
}
static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
int32_t disp, int mask)
int32_t disp)
{
if (mask) {
TCGv tmp = tcg_temp_new();
DisasJumpType ret;
tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1);
ret = gen_bcond_internal(ctx, cond, tmp, disp);
return ret;
}
return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp);
return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra),
is_tst_cond(cond), disp);
}
/* Fold -0.0 for comparison with COND. */
static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
static TCGv_i64 gen_fold_mzero(TCGCond *pcond, uint64_t *pimm, TCGv_i64 src)
{
uint64_t mzero = 1ull << 63;
TCGv_i64 tmp;
switch (cond) {
*pimm = 0;
switch (*pcond) {
case TCG_COND_LE:
case TCG_COND_GT:
/* For <= or >, the -0.0 value directly compares the way we want. */
tcg_gen_mov_i64(dest, src);
break;
return src;
case TCG_COND_EQ:
case TCG_COND_NE:
/* For == or !=, we can simply mask off the sign bit and compare. */
tcg_gen_andi_i64(dest, src, mzero - 1);
break;
/* For == or !=, we can compare without the sign bit. */
*pcond = *pcond == TCG_COND_EQ ? TCG_COND_TSTEQ : TCG_COND_TSTNE;
*pimm = INT64_MAX;
return src;
case TCG_COND_GE:
case TCG_COND_LT:
/* For >= or <, map -0.0 to +0.0. */
tcg_gen_movcond_i64(TCG_COND_NE, dest, src, tcg_constant_i64(mzero),
src, tcg_constant_i64(0));
break;
tmp = tcg_temp_new_i64();
tcg_gen_movcond_i64(TCG_COND_EQ, tmp,
src, tcg_constant_i64(INT64_MIN),
tcg_constant_i64(0), src);
return tmp;
default:
abort();
g_assert_not_reached();
}
}
static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
int32_t disp)
{
TCGv cmp_tmp = tcg_temp_new();
DisasJumpType ret;
gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp);
return ret;
uint64_t imm;
TCGv_i64 tmp = gen_fold_mzero(&cond, &imm, load_fpr(ctx, ra));
return gen_bcond_internal(ctx, cond, tmp, imm, disp);
}
static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
{
TCGv_i64 va, vb, z;
z = load_zero(ctx);
vb = load_fpr(ctx, rb);
va = tcg_temp_new();
gen_fold_mzero(cond, va, load_fpr(ctx, ra));
tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
uint64_t imm;
TCGv_i64 tmp = gen_fold_mzero(&cond, &imm, load_fpr(ctx, ra));
tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc),
tmp, tcg_constant_i64(imm),
load_fpr(ctx, rb), load_fpr(ctx, rc));
}
#define QUAL_RM_N 0x080 /* Round mode nearest even */
@ -1683,16 +1673,12 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x14:
/* CMOVLBS */
tmp = tcg_temp_new();
tcg_gen_andi_i64(tmp, va, 1);
tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
tcg_gen_movcond_i64(TCG_COND_TSTNE, vc, va, tcg_constant_i64(1),
vb, load_gpr(ctx, rc));
break;
case 0x16:
/* CMOVLBC */
tmp = tcg_temp_new();
tcg_gen_andi_i64(tmp, va, 1);
tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
tcg_gen_movcond_i64(TCG_COND_TSTEQ, vc, va, tcg_constant_i64(1),
vb, load_gpr(ctx, rc));
break;
case 0x20:
@ -2827,35 +2813,35 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x38:
/* BLBC */
ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
ret = gen_bcond(ctx, TCG_COND_TSTEQ, ra, disp21);
break;
case 0x39:
/* BEQ */
ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21);
break;
case 0x3A:
/* BLT */
ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21);
break;
case 0x3B:
/* BLE */
ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21);
break;
case 0x3C:
/* BLBS */
ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
ret = gen_bcond(ctx, TCG_COND_TSTNE, ra, disp21);
break;
case 0x3D:
/* BNE */
ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21);
break;
case 0x3E:
/* BGE */
ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21);
break;
case 0x3F:
/* BGT */
ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21);
break;
invalid_opc:
ret = gen_invalid(ctx);

View file

@ -5129,46 +5129,44 @@ undef:
static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
{
TCGv fpsr;
int imm = 0;
c->v2 = tcg_constant_i32(0);
/* TODO: Raise BSUN exception. */
fpsr = tcg_temp_new();
gen_load_fcr(s, fpsr, M68K_FPSR);
c->v1 = fpsr;
switch (cond) {
case 0: /* False */
case 16: /* Signaling False */
c->v1 = c->v2;
c->tcond = TCG_COND_NEVER;
break;
case 1: /* EQual Z */
case 17: /* Signaling EQual Z */
c->v1 = tcg_temp_new();
tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
c->tcond = TCG_COND_NE;
imm = FPSR_CC_Z;
c->tcond = TCG_COND_TSTNE;
break;
case 2: /* Ordered Greater Than !(A || Z || N) */
case 18: /* Greater Than !(A || Z || N) */
c->v1 = tcg_temp_new();
tcg_gen_andi_i32(c->v1, fpsr,
FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
c->tcond = TCG_COND_EQ;
imm = FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N;
c->tcond = TCG_COND_TSTEQ;
break;
case 3: /* Ordered Greater than or Equal Z || !(A || N) */
case 19: /* Greater than or Equal Z || !(A || N) */
c->v1 = tcg_temp_new();
tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N);
tcg_gen_or_i32(c->v1, c->v1, fpsr);
tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
c->tcond = TCG_COND_NE;
imm = FPSR_CC_Z | FPSR_CC_N;
c->tcond = TCG_COND_TSTNE;
break;
case 4: /* Ordered Less Than !(!N || A || Z); */
case 20: /* Less Than !(!N || A || Z); */
c->v1 = tcg_temp_new();
tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z);
c->tcond = TCG_COND_EQ;
imm = FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z;
c->tcond = TCG_COND_TSTEQ;
break;
case 5: /* Ordered Less than or Equal Z || (N && !A) */
case 21: /* Less than or Equal Z || (N && !A) */
@ -5176,49 +5174,45 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
tcg_gen_andc_i32(c->v1, fpsr, c->v1);
tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N);
c->tcond = TCG_COND_NE;
imm = FPSR_CC_Z | FPSR_CC_N;
c->tcond = TCG_COND_TSTNE;
break;
case 6: /* Ordered Greater or Less than !(A || Z) */
case 22: /* Greater or Less than !(A || Z) */
c->v1 = tcg_temp_new();
tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
c->tcond = TCG_COND_EQ;
imm = FPSR_CC_A | FPSR_CC_Z;
c->tcond = TCG_COND_TSTEQ;
break;
case 7: /* Ordered !A */
case 23: /* Greater, Less or Equal !A */
c->v1 = tcg_temp_new();
tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
c->tcond = TCG_COND_EQ;
imm = FPSR_CC_A;
c->tcond = TCG_COND_TSTEQ;
break;
case 8: /* Unordered A */
case 24: /* Not Greater, Less or Equal A */
c->v1 = tcg_temp_new();
tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
c->tcond = TCG_COND_NE;
imm = FPSR_CC_A;
c->tcond = TCG_COND_TSTNE;
break;
case 9: /* Unordered or Equal A || Z */
case 25: /* Not Greater or Less then A || Z */
c->v1 = tcg_temp_new();
tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
c->tcond = TCG_COND_NE;
imm = FPSR_CC_A | FPSR_CC_Z;
c->tcond = TCG_COND_TSTNE;
break;
case 10: /* Unordered or Greater Than A || !(N || Z)) */
case 26: /* Not Less or Equal A || !(N || Z)) */
c->v1 = tcg_temp_new();
tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N);
tcg_gen_or_i32(c->v1, c->v1, fpsr);
tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
c->tcond = TCG_COND_NE;
imm = FPSR_CC_A | FPSR_CC_N;
c->tcond = TCG_COND_TSTNE;
break;
case 11: /* Unordered or Greater or Equal A || Z || !N */
case 27: /* Not Less Than A || Z || !N */
c->v1 = tcg_temp_new();
tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
c->tcond = TCG_COND_NE;
tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
imm = FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N;
c->tcond = TCG_COND_TSTNE;
break;
case 12: /* Unordered or Less Than A || (N && !Z) */
case 28: /* Not Greater than or Equal A || (N && !Z) */
@ -5226,27 +5220,25 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
tcg_gen_andc_i32(c->v1, fpsr, c->v1);
tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N);
c->tcond = TCG_COND_NE;
imm = FPSR_CC_A | FPSR_CC_N;
c->tcond = TCG_COND_TSTNE;
break;
case 13: /* Unordered or Less or Equal A || Z || N */
case 29: /* Not Greater Than A || Z || N */
c->v1 = tcg_temp_new();
tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
c->tcond = TCG_COND_NE;
imm = FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N;
c->tcond = TCG_COND_TSTNE;
break;
case 14: /* Not Equal !Z */
case 30: /* Signaling Not Equal !Z */
c->v1 = tcg_temp_new();
tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
c->tcond = TCG_COND_EQ;
imm = FPSR_CC_Z;
c->tcond = TCG_COND_TSTEQ;
break;
case 15: /* True */
case 31: /* Signaling True */
c->v1 = c->v2;
c->tcond = TCG_COND_ALWAYS;
break;
}
c->v2 = tcg_constant_i32(imm);
}
static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)

View file

@ -754,10 +754,10 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
case CC_OP_TM_64:
switch (mask) {
case 8:
cond = TCG_COND_EQ;
cond = TCG_COND_TSTEQ;
break;
case 4 | 2 | 1:
cond = TCG_COND_NE;
cond = TCG_COND_TSTNE;
break;
default:
goto do_dynamic;
@ -768,11 +768,11 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
case CC_OP_ICM:
switch (mask) {
case 8:
cond = TCG_COND_EQ;
cond = TCG_COND_TSTEQ;
break;
case 4 | 2 | 1:
case 4 | 2:
cond = TCG_COND_NE;
cond = TCG_COND_TSTNE;
break;
default:
goto do_dynamic;
@ -854,18 +854,14 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
c->u.s64.a = cc_dst;
c->u.s64.b = tcg_constant_i64(0);
break;
case CC_OP_LTGT_64:
case CC_OP_LTUGTU_64:
c->u.s64.a = cc_src;
c->u.s64.b = cc_dst;
break;
case CC_OP_TM_32:
case CC_OP_TM_64:
case CC_OP_ICM:
c->u.s64.a = tcg_temp_new_i64();
c->u.s64.b = tcg_constant_i64(0);
tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
c->u.s64.a = cc_src;
c->u.s64.b = cc_dst;
break;
case CC_OP_ADDU:
@ -889,67 +885,45 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
case CC_OP_STATIC:
c->is_64 = false;
c->u.s32.a = cc_op;
switch (mask) {
case 0x8 | 0x4 | 0x2: /* cc != 3 */
cond = TCG_COND_NE;
/* Fold half of the cases using bit 3 to invert. */
switch (mask & 8 ? mask ^ 0xf : mask) {
case 0x1: /* cc == 3 */
cond = TCG_COND_EQ;
c->u.s32.b = tcg_constant_i32(3);
break;
case 0x8 | 0x4 | 0x1: /* cc != 2 */
cond = TCG_COND_NE;
c->u.s32.b = tcg_constant_i32(2);
break;
case 0x8 | 0x2 | 0x1: /* cc != 1 */
cond = TCG_COND_NE;
c->u.s32.b = tcg_constant_i32(1);
break;
case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
cond = TCG_COND_EQ;
c->u.s32.a = tcg_temp_new_i32();
c->u.s32.b = tcg_constant_i32(0);
tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
break;
case 0x8 | 0x4: /* cc < 2 */
cond = TCG_COND_LTU;
c->u.s32.b = tcg_constant_i32(2);
break;
case 0x8: /* cc == 0 */
cond = TCG_COND_EQ;
c->u.s32.b = tcg_constant_i32(0);
break;
case 0x4 | 0x2 | 0x1: /* cc != 0 */
cond = TCG_COND_NE;
c->u.s32.b = tcg_constant_i32(0);
break;
case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
cond = TCG_COND_NE;
c->u.s32.a = tcg_temp_new_i32();
c->u.s32.b = tcg_constant_i32(0);
tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
break;
case 0x4: /* cc == 1 */
cond = TCG_COND_EQ;
c->u.s32.b = tcg_constant_i32(1);
break;
case 0x2 | 0x1: /* cc > 1 */
cond = TCG_COND_GTU;
c->u.s32.b = tcg_constant_i32(1);
break;
case 0x2: /* cc == 2 */
cond = TCG_COND_EQ;
c->u.s32.b = tcg_constant_i32(2);
break;
case 0x1: /* cc == 3 */
case 0x4: /* cc == 1 */
cond = TCG_COND_EQ;
c->u.s32.b = tcg_constant_i32(3);
c->u.s32.b = tcg_constant_i32(1);
break;
case 0x2 | 0x1: /* cc == 2 || cc == 3 => cc > 1 */
cond = TCG_COND_GTU;
c->u.s32.b = tcg_constant_i32(1);
break;
case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
cond = TCG_COND_TSTNE;
c->u.s32.b = tcg_constant_i32(1);
break;
case 0x4 | 0x2: /* cc == 1 || cc == 2 => (cc - 1) <= 1 */
cond = TCG_COND_LEU;
c->u.s32.a = tcg_temp_new_i32();
c->u.s32.b = tcg_constant_i32(1);
tcg_gen_addi_i32(c->u.s32.a, cc_op, -1);
break;
case 0x4 | 0x2 | 0x1: /* cc != 0 */
cond = TCG_COND_NE;
c->u.s32.b = tcg_constant_i32(0);
break;
default:
/* CC is masked by something else: (8 >> cc) & mask. */
cond = TCG_COND_NE;
c->u.s32.a = tcg_temp_new_i32();
c->u.s32.b = tcg_constant_i32(0);
tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op);
tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
break;
/* case 0: never, handled above. */
g_assert_not_reached();
}
if (mask & 8) {
cond = tcg_invert_cond(cond);
}
break;

View file

@ -488,6 +488,7 @@ static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
{
TCGv zero = tcg_constant_tl(0);
TCGv one = tcg_constant_tl(1);
TCGv t_src1 = tcg_temp_new();
TCGv t_src2 = tcg_temp_new();
TCGv t0 = tcg_temp_new();
@ -499,8 +500,7 @@ static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
* if (!(env->y & 1))
* src2 = 0;
*/
tcg_gen_andi_tl(t0, cpu_y, 0x1);
tcg_gen_movcond_tl(TCG_COND_EQ, t_src2, t0, zero, zero, t_src2);
tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
/*
* b2 = src1 & 1;