diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index 7da67086c6..c8d1e765d9 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -2610,21 +2610,24 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, case INDEX_op_shl_i32: if (const_args[2]) { - tcg_out_shli32(s, args[0], args[1], args[2]); + /* Limit immediate shift count lest we create an illegal insn. */ + tcg_out_shli32(s, args[0], args[1], args[2] & 31); } else { tcg_out32(s, SLW | SAB(args[1], args[0], args[2])); } break; case INDEX_op_shr_i32: if (const_args[2]) { - tcg_out_shri32(s, args[0], args[1], args[2]); + /* Limit immediate shift count lest we create an illegal insn. */ + tcg_out_shri32(s, args[0], args[1], args[2] & 31); } else { tcg_out32(s, SRW | SAB(args[1], args[0], args[2])); } break; case INDEX_op_sar_i32: if (const_args[2]) { - tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2])); + /* Limit immediate shift count lest we create an illegal insn. */ + tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2] & 31)); } else { tcg_out32(s, SRAW | SAB(args[1], args[0], args[2])); } @@ -2696,14 +2699,16 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, case INDEX_op_shl_i64: if (const_args[2]) { - tcg_out_shli64(s, args[0], args[1], args[2]); + /* Limit immediate shift count lest we create an illegal insn. */ + tcg_out_shli64(s, args[0], args[1], args[2] & 63); } else { tcg_out32(s, SLD | SAB(args[1], args[0], args[2])); } break; case INDEX_op_shr_i64: if (const_args[2]) { - tcg_out_shri64(s, args[0], args[1], args[2]); + /* Limit immediate shift count lest we create an illegal insn. */ + tcg_out_shri64(s, args[0], args[1], args[2] & 63); } else { tcg_out32(s, SRD | SAB(args[1], args[0], args[2])); } diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index e60b74fb82..4b8a473fad 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -3189,8 +3189,9 @@ static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, memop = tcg_canonicalize_memop(memop, 0, 0); - tcg_gen_qemu_ld_i32(t1, addr, idx, memop & ~MO_SIGN); - gen(t2, t1, val); + tcg_gen_qemu_ld_i32(t1, addr, idx, memop); + tcg_gen_ext_i32(t2, val, memop); + gen(t2, t1, t2); tcg_gen_qemu_st_i32(t2, addr, idx, memop); tcg_gen_ext_i32(ret, (new_val ? t2 : t1), memop); @@ -3232,8 +3233,9 @@ static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, memop = tcg_canonicalize_memop(memop, 1, 0); - tcg_gen_qemu_ld_i64(t1, addr, idx, memop & ~MO_SIGN); - gen(t2, t1, val); + tcg_gen_qemu_ld_i64(t1, addr, idx, memop); + tcg_gen_ext_i64(t2, val, memop); + gen(t2, t1, t2); tcg_gen_qemu_st_i64(t2, addr, idx, memop); tcg_gen_ext_i64(ret, (new_val ? t2 : t1), memop);