mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-05 16:53:55 -06:00
target/riscv: Support Zama16b extension
Zama16b is the property that misaligned load/stores/atomics within a naturally aligned 16-byte region are atomic. According to the specification, Zama16b applies only to AMOs, loads and stores defined in the base ISAs, and loads and stores of no more than XLEN bits defined in the F, D, and Q extensions. Thus it should not apply to zacas or RVC instructions. For an instruction in that set, if all accessed bytes lie within 16B granule, the instruction will not raise an exception for reasons of address alignment, and the instruction will give rise to only one memory operation for the purposes of RVWMO—i.e., it will execute atomically. Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com> Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Message-ID: <20240709113652.1239-6-zhiwei_liu@linux.alibaba.com> Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
This commit is contained in:
parent
67e98ebad0
commit
a60ce58fd9
6 changed files with 57 additions and 22 deletions
|
@ -118,6 +118,7 @@ const RISCVIsaExtData isa_edata_arr[] = {
|
||||||
ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11),
|
ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11),
|
||||||
ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo),
|
ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo),
|
||||||
ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
|
ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
|
||||||
|
ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b),
|
||||||
ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc),
|
ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc),
|
||||||
ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
|
ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
|
||||||
ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
|
ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
|
||||||
|
@ -1476,6 +1477,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
|
||||||
MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false),
|
MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false),
|
||||||
MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false),
|
MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false),
|
||||||
MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
|
MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
|
||||||
|
MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false),
|
||||||
MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false),
|
MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false),
|
||||||
MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false),
|
MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false),
|
||||||
MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
|
MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
|
||||||
|
|
|
@ -83,6 +83,7 @@ struct RISCVCPUConfig {
|
||||||
bool ext_zdinx;
|
bool ext_zdinx;
|
||||||
bool ext_zaamo;
|
bool ext_zaamo;
|
||||||
bool ext_zacas;
|
bool ext_zacas;
|
||||||
|
bool ext_zama16b;
|
||||||
bool ext_zalrsc;
|
bool ext_zalrsc;
|
||||||
bool ext_zawrs;
|
bool ext_zawrs;
|
||||||
bool ext_zfa;
|
bool ext_zfa;
|
||||||
|
|
|
@ -103,6 +103,12 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a,
|
||||||
TCGv dest = dest_gpr(ctx, a->rd);
|
TCGv dest = dest_gpr(ctx, a->rd);
|
||||||
TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE);
|
TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE);
|
||||||
|
|
||||||
|
if (ctx->cfg_ptr->ext_zama16b) {
|
||||||
|
mop |= MO_ATOM_WITHIN16;
|
||||||
|
} else {
|
||||||
|
mop |= MO_ALIGN;
|
||||||
|
}
|
||||||
|
|
||||||
decode_save_opc(ctx);
|
decode_save_opc(ctx);
|
||||||
src1 = get_address(ctx, a->rs1, 0);
|
src1 = get_address(ctx, a->rs1, 0);
|
||||||
func(dest, src1, src2, ctx->mem_idx, mop);
|
func(dest, src1, src2, ctx->mem_idx, mop);
|
||||||
|
@ -126,55 +132,55 @@ static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a)
|
||||||
static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a)
|
static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a)
|
||||||
{
|
{
|
||||||
REQUIRE_A_OR_ZAAMO(ctx);
|
REQUIRE_A_OR_ZAAMO(ctx);
|
||||||
return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TESL));
|
return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TESL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a)
|
static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a)
|
||||||
{
|
{
|
||||||
REQUIRE_A_OR_ZAAMO(ctx);
|
REQUIRE_A_OR_ZAAMO(ctx);
|
||||||
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TESL));
|
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TESL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a)
|
static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a)
|
||||||
{
|
{
|
||||||
REQUIRE_A_OR_ZAAMO(ctx);
|
REQUIRE_A_OR_ZAAMO(ctx);
|
||||||
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TESL));
|
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TESL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a)
|
static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a)
|
||||||
{
|
{
|
||||||
REQUIRE_A_OR_ZAAMO(ctx);
|
REQUIRE_A_OR_ZAAMO(ctx);
|
||||||
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TESL));
|
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TESL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a)
|
static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a)
|
||||||
{
|
{
|
||||||
REQUIRE_A_OR_ZAAMO(ctx);
|
REQUIRE_A_OR_ZAAMO(ctx);
|
||||||
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TESL));
|
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TESL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a)
|
static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a)
|
||||||
{
|
{
|
||||||
REQUIRE_A_OR_ZAAMO(ctx);
|
REQUIRE_A_OR_ZAAMO(ctx);
|
||||||
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TESL));
|
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TESL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a)
|
static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a)
|
||||||
{
|
{
|
||||||
REQUIRE_A_OR_ZAAMO(ctx);
|
REQUIRE_A_OR_ZAAMO(ctx);
|
||||||
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TESL));
|
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TESL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a)
|
static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a)
|
||||||
{
|
{
|
||||||
REQUIRE_A_OR_ZAAMO(ctx);
|
REQUIRE_A_OR_ZAAMO(ctx);
|
||||||
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TESL));
|
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TESL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a)
|
static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a)
|
||||||
{
|
{
|
||||||
REQUIRE_A_OR_ZAAMO(ctx);
|
REQUIRE_A_OR_ZAAMO(ctx);
|
||||||
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TESL));
|
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TESL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a)
|
static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a)
|
||||||
|
@ -195,61 +201,61 @@ static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a)
|
||||||
{
|
{
|
||||||
REQUIRE_64BIT(ctx);
|
REQUIRE_64BIT(ctx);
|
||||||
REQUIRE_A_OR_ZAAMO(ctx);
|
REQUIRE_A_OR_ZAAMO(ctx);
|
||||||
return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEUQ));
|
return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TEUQ);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a)
|
static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a)
|
||||||
{
|
{
|
||||||
REQUIRE_64BIT(ctx);
|
REQUIRE_64BIT(ctx);
|
||||||
REQUIRE_A_OR_ZAAMO(ctx);
|
REQUIRE_A_OR_ZAAMO(ctx);
|
||||||
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEUQ));
|
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TEUQ);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a)
|
static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a)
|
||||||
{
|
{
|
||||||
REQUIRE_64BIT(ctx);
|
REQUIRE_64BIT(ctx);
|
||||||
REQUIRE_A_OR_ZAAMO(ctx);
|
REQUIRE_A_OR_ZAAMO(ctx);
|
||||||
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEUQ));
|
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TEUQ);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a)
|
static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a)
|
||||||
{
|
{
|
||||||
REQUIRE_64BIT(ctx);
|
REQUIRE_64BIT(ctx);
|
||||||
REQUIRE_A_OR_ZAAMO(ctx);
|
REQUIRE_A_OR_ZAAMO(ctx);
|
||||||
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEUQ));
|
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TEUQ);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a)
|
static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a)
|
||||||
{
|
{
|
||||||
REQUIRE_64BIT(ctx);
|
REQUIRE_64BIT(ctx);
|
||||||
REQUIRE_A_OR_ZAAMO(ctx);
|
REQUIRE_A_OR_ZAAMO(ctx);
|
||||||
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEUQ));
|
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TEUQ);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a)
|
static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a)
|
||||||
{
|
{
|
||||||
REQUIRE_64BIT(ctx);
|
REQUIRE_64BIT(ctx);
|
||||||
REQUIRE_A_OR_ZAAMO(ctx);
|
REQUIRE_A_OR_ZAAMO(ctx);
|
||||||
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEUQ));
|
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TEUQ);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a)
|
static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a)
|
||||||
{
|
{
|
||||||
REQUIRE_64BIT(ctx);
|
REQUIRE_64BIT(ctx);
|
||||||
REQUIRE_A_OR_ZAAMO(ctx);
|
REQUIRE_A_OR_ZAAMO(ctx);
|
||||||
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEUQ));
|
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TEUQ);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a)
|
static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a)
|
||||||
{
|
{
|
||||||
REQUIRE_64BIT(ctx);
|
REQUIRE_64BIT(ctx);
|
||||||
REQUIRE_A_OR_ZAAMO(ctx);
|
REQUIRE_A_OR_ZAAMO(ctx);
|
||||||
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEUQ));
|
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TEUQ);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a)
|
static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a)
|
||||||
{
|
{
|
||||||
REQUIRE_64BIT(ctx);
|
REQUIRE_64BIT(ctx);
|
||||||
REQUIRE_A_OR_ZAAMO(ctx);
|
REQUIRE_A_OR_ZAAMO(ctx);
|
||||||
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEUQ));
|
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TEUQ);
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,13 +42,18 @@
|
||||||
static bool trans_fld(DisasContext *ctx, arg_fld *a)
|
static bool trans_fld(DisasContext *ctx, arg_fld *a)
|
||||||
{
|
{
|
||||||
TCGv addr;
|
TCGv addr;
|
||||||
|
MemOp memop = MO_TEUQ;
|
||||||
|
|
||||||
REQUIRE_FPU;
|
REQUIRE_FPU;
|
||||||
REQUIRE_EXT(ctx, RVD);
|
REQUIRE_EXT(ctx, RVD);
|
||||||
|
|
||||||
|
if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
|
||||||
|
memop |= MO_ATOM_WITHIN16;
|
||||||
|
}
|
||||||
|
|
||||||
decode_save_opc(ctx);
|
decode_save_opc(ctx);
|
||||||
addr = get_address(ctx, a->rs1, a->imm);
|
addr = get_address(ctx, a->rs1, a->imm);
|
||||||
tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, MO_TEUQ);
|
tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, memop);
|
||||||
|
|
||||||
mark_fs_dirty(ctx);
|
mark_fs_dirty(ctx);
|
||||||
return true;
|
return true;
|
||||||
|
@ -57,13 +62,18 @@ static bool trans_fld(DisasContext *ctx, arg_fld *a)
|
||||||
static bool trans_fsd(DisasContext *ctx, arg_fsd *a)
|
static bool trans_fsd(DisasContext *ctx, arg_fsd *a)
|
||||||
{
|
{
|
||||||
TCGv addr;
|
TCGv addr;
|
||||||
|
MemOp memop = MO_TEUQ;
|
||||||
|
|
||||||
REQUIRE_FPU;
|
REQUIRE_FPU;
|
||||||
REQUIRE_EXT(ctx, RVD);
|
REQUIRE_EXT(ctx, RVD);
|
||||||
|
|
||||||
|
if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
|
||||||
|
memop |= MO_ATOM_WITHIN16;
|
||||||
|
}
|
||||||
|
|
||||||
decode_save_opc(ctx);
|
decode_save_opc(ctx);
|
||||||
addr = get_address(ctx, a->rs1, a->imm);
|
addr = get_address(ctx, a->rs1, a->imm);
|
||||||
tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUQ);
|
tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -43,14 +43,19 @@ static bool trans_flw(DisasContext *ctx, arg_flw *a)
|
||||||
{
|
{
|
||||||
TCGv_i64 dest;
|
TCGv_i64 dest;
|
||||||
TCGv addr;
|
TCGv addr;
|
||||||
|
MemOp memop = MO_TEUL;
|
||||||
|
|
||||||
REQUIRE_FPU;
|
REQUIRE_FPU;
|
||||||
REQUIRE_EXT(ctx, RVF);
|
REQUIRE_EXT(ctx, RVF);
|
||||||
|
|
||||||
|
if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
|
||||||
|
memop |= MO_ATOM_WITHIN16;
|
||||||
|
}
|
||||||
|
|
||||||
decode_save_opc(ctx);
|
decode_save_opc(ctx);
|
||||||
addr = get_address(ctx, a->rs1, a->imm);
|
addr = get_address(ctx, a->rs1, a->imm);
|
||||||
dest = cpu_fpr[a->rd];
|
dest = cpu_fpr[a->rd];
|
||||||
tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_TEUL);
|
tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, memop);
|
||||||
gen_nanbox_s(dest, dest);
|
gen_nanbox_s(dest, dest);
|
||||||
|
|
||||||
mark_fs_dirty(ctx);
|
mark_fs_dirty(ctx);
|
||||||
|
@ -60,13 +65,18 @@ static bool trans_flw(DisasContext *ctx, arg_flw *a)
|
||||||
static bool trans_fsw(DisasContext *ctx, arg_fsw *a)
|
static bool trans_fsw(DisasContext *ctx, arg_fsw *a)
|
||||||
{
|
{
|
||||||
TCGv addr;
|
TCGv addr;
|
||||||
|
MemOp memop = MO_TEUL;
|
||||||
|
|
||||||
REQUIRE_FPU;
|
REQUIRE_FPU;
|
||||||
REQUIRE_EXT(ctx, RVF);
|
REQUIRE_EXT(ctx, RVF);
|
||||||
|
|
||||||
|
if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
|
||||||
|
memop |= MO_ATOM_WITHIN16;
|
||||||
|
}
|
||||||
|
|
||||||
decode_save_opc(ctx);
|
decode_save_opc(ctx);
|
||||||
addr = get_address(ctx, a->rs1, a->imm);
|
addr = get_address(ctx, a->rs1, a->imm);
|
||||||
tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUL);
|
tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -268,6 +268,9 @@ static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
|
||||||
{
|
{
|
||||||
bool out;
|
bool out;
|
||||||
|
|
||||||
|
if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
|
||||||
|
memop |= MO_ATOM_WITHIN16;
|
||||||
|
}
|
||||||
decode_save_opc(ctx);
|
decode_save_opc(ctx);
|
||||||
if (get_xl(ctx) == MXL_RV128) {
|
if (get_xl(ctx) == MXL_RV128) {
|
||||||
out = gen_load_i128(ctx, a, memop);
|
out = gen_load_i128(ctx, a, memop);
|
||||||
|
@ -366,6 +369,9 @@ static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop)
|
||||||
|
|
||||||
static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
|
static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
|
||||||
{
|
{
|
||||||
|
if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
|
||||||
|
memop |= MO_ATOM_WITHIN16;
|
||||||
|
}
|
||||||
decode_save_opc(ctx);
|
decode_save_opc(ctx);
|
||||||
if (get_xl(ctx) == MXL_RV128) {
|
if (get_xl(ctx) == MXL_RV128) {
|
||||||
return gen_store_i128(ctx, a, memop);
|
return gen_store_i128(ctx, a, memop);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue