mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-07-27 12:23:53 -06:00
tcg/optimize: Use fold_and and fold_masks_z in fold_deposit
Avoid the use of the OptContext slots. Find TempOptInfo once. When we fold to and, use fold_and. Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
81be07f905
commit
c7739ab83e
1 changed files with 17 additions and 18 deletions
|
@ -1625,14 +1625,17 @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
|
|||
|
||||
static bool fold_deposit(OptContext *ctx, TCGOp *op)
|
||||
{
|
||||
TempOptInfo *t1 = arg_info(op->args[1]);
|
||||
TempOptInfo *t2 = arg_info(op->args[2]);
|
||||
int ofs = op->args[3];
|
||||
int len = op->args[4];
|
||||
TCGOpcode and_opc;
|
||||
uint64_t z_mask;
|
||||
|
||||
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
|
||||
uint64_t t1 = arg_info(op->args[1])->val;
|
||||
uint64_t t2 = arg_info(op->args[2])->val;
|
||||
|
||||
t1 = deposit64(t1, op->args[3], op->args[4], t2);
|
||||
return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
|
||||
if (ti_is_const(t1) && ti_is_const(t2)) {
|
||||
return tcg_opt_gen_movi(ctx, op, op->args[0],
|
||||
deposit64(ti_const_val(t1), ofs, len,
|
||||
ti_const_val(t2)));
|
||||
}
|
||||
|
||||
switch (ctx->type) {
|
||||
|
@ -1647,30 +1650,26 @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
|
|||
}
|
||||
|
||||
/* Inserting a value into zero at offset 0. */
|
||||
if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) {
|
||||
uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
|
||||
if (ti_is_const_val(t1, 0) && ofs == 0) {
|
||||
uint64_t mask = MAKE_64BIT_MASK(0, len);
|
||||
|
||||
op->opc = and_opc;
|
||||
op->args[1] = op->args[2];
|
||||
op->args[2] = arg_new_constant(ctx, mask);
|
||||
ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
|
||||
return false;
|
||||
return fold_and(ctx, op);
|
||||
}
|
||||
|
||||
/* Inserting zero into a value. */
|
||||
if (arg_is_const_val(op->args[2], 0)) {
|
||||
uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
|
||||
if (ti_is_const_val(t2, 0)) {
|
||||
uint64_t mask = deposit64(-1, ofs, len, 0);
|
||||
|
||||
op->opc = and_opc;
|
||||
op->args[2] = arg_new_constant(ctx, mask);
|
||||
ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
|
||||
return false;
|
||||
return fold_and(ctx, op);
|
||||
}
|
||||
|
||||
ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
|
||||
op->args[3], op->args[4],
|
||||
arg_info(op->args[2])->z_mask);
|
||||
return false;
|
||||
z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
|
||||
return fold_masks_z(ctx, op, z_mask);
|
||||
}
|
||||
|
||||
static bool fold_divide(OptContext *ctx, TCGOp *op)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue