mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-04 08:13:54 -06:00
tcg/optimize: Use fold_masks_zs in fold_xor
Avoid the use of the OptContext slots. Find TempOptInfo once. Remove fold_masks as the function becomes unused. Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
0fb5b757c3
commit
c890fd7179
1 changed files with 8 additions and 10 deletions
|
@ -1077,11 +1077,6 @@ static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
|
|||
return fold_masks_zs(ctx, op, -1, s_mask);
|
||||
}
|
||||
|
||||
static bool fold_masks(OptContext *ctx, TCGOp *op)
|
||||
{
|
||||
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
|
||||
}
|
||||
|
||||
/*
|
||||
* An "affected" mask bit is 0 if and only if the result is identical
|
||||
* to the first input. Thus if the entire mask is 0, the operation
|
||||
|
@ -2769,6 +2764,9 @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
|
|||
|
||||
static bool fold_xor(OptContext *ctx, TCGOp *op)
|
||||
{
|
||||
uint64_t z_mask, s_mask;
|
||||
TempOptInfo *t1, *t2;
|
||||
|
||||
if (fold_const2_commutative(ctx, op) ||
|
||||
fold_xx_to_i(ctx, op, 0) ||
|
||||
fold_xi_to_x(ctx, op, 0) ||
|
||||
|
@ -2776,11 +2774,11 @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
|
|||
return true;
|
||||
}
|
||||
|
||||
ctx->z_mask = arg_info(op->args[1])->z_mask
|
||||
| arg_info(op->args[2])->z_mask;
|
||||
ctx->s_mask = arg_info(op->args[1])->s_mask
|
||||
& arg_info(op->args[2])->s_mask;
|
||||
return fold_masks(ctx, op);
|
||||
t1 = arg_info(op->args[1]);
|
||||
t2 = arg_info(op->args[2]);
|
||||
z_mask = t1->z_mask | t2->z_mask;
|
||||
s_mask = t1->s_mask & t2->s_mask;
|
||||
return fold_masks_zs(ctx, op, z_mask, s_mask);
|
||||
}
|
||||
|
||||
static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue