/* * Tiny Code Generator for QEMU * * Copyright (c) 2009, 2011 Stefan Weil * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ /* Used for function call generation. */ #define TCG_TARGET_CALL_STACK_OFFSET 0 #define TCG_TARGET_STACK_ALIGN 8 #if TCG_TARGET_REG_BITS == 32 # define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EVEN # define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN # define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN #else # define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL # define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL # define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL #endif #define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) { return C_NotImplemented; } static const int tcg_target_reg_alloc_order[] = { TCG_REG_R4, TCG_REG_R5, TCG_REG_R6, TCG_REG_R7, TCG_REG_R8, TCG_REG_R9, TCG_REG_R10, TCG_REG_R11, TCG_REG_R12, TCG_REG_R13, TCG_REG_R14, TCG_REG_R15, /* Either 2 or 4 of these are call clobbered, so use them last. */ TCG_REG_R3, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0, }; /* No call arguments via registers. All will be stored on the "stack". */ static const int tcg_target_call_iarg_regs[] = { }; static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) { tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); tcg_debug_assert(slot >= 0 && slot < 128 / TCG_TARGET_REG_BITS); return TCG_REG_R0 + slot; } #ifdef CONFIG_DEBUG_TCG static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { "r00", "r01", "r02", "r03", "r04", "r05", "r06", "r07", "r08", "r09", "r10", "r11", "r12", "r13", "r14", "r15", }; #endif static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend) { intptr_t diff = value - (intptr_t)(code_ptr + 1); tcg_debug_assert(addend == 0); tcg_debug_assert(type == 20); if (diff == sextract32(diff, 0, type)) { tcg_patch32(code_ptr, deposit32(*code_ptr, 32 - type, type, diff)); return true; } return false; } static void stack_bounds_check(TCGReg base, intptr_t offset) { if (base == TCG_REG_CALL_STACK) { tcg_debug_assert(offset >= 0); tcg_debug_assert(offset < (TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE)); } } static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0) { tcg_insn_unit insn = 0; tcg_out_reloc(s, s->code_ptr, 20, l0, 0); insn = deposit32(insn, 0, 8, op); tcg_out32(s, insn); } static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0) { tcg_insn_unit insn = 0; intptr_t diff; /* Special case for exit_tb: map null -> 0. */ if (p0 == NULL) { diff = 0; } else { diff = p0 - (void *)(s->code_ptr + 1); tcg_debug_assert(diff != 0); if (diff != sextract32(diff, 0, 20)) { tcg_raise_tb_overflow(s); } } insn = deposit32(insn, 0, 8, op); insn = deposit32(insn, 12, 20, diff); tcg_out32(s, insn); } static void tcg_out_op_r(TCGContext *s, TCGOpcode op, TCGReg r0) { tcg_insn_unit insn = 0; insn = deposit32(insn, 0, 8, op); insn = deposit32(insn, 8, 4, r0); tcg_out32(s, insn); } static void tcg_out_op_v(TCGContext *s, TCGOpcode op) { tcg_out32(s, (uint8_t)op); } static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1) { tcg_insn_unit insn = 0; tcg_debug_assert(i1 == sextract32(i1, 0, 20)); insn = deposit32(insn, 0, 8, op); insn = deposit32(insn, 8, 4, r0); insn = deposit32(insn, 12, 20, i1); tcg_out32(s, insn); } static void tcg_out_op_rl(TCGContext *s, TCGOpcode op, TCGReg r0, TCGLabel *l1) { tcg_insn_unit insn = 0; tcg_out_reloc(s, s->code_ptr, 20, l1, 0); insn = deposit32(insn, 0, 8, op); insn = deposit32(insn, 8, 4, r0); tcg_out32(s, insn); } static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1) { tcg_insn_unit insn = 0; insn = deposit32(insn, 0, 8, op); insn = deposit32(insn, 8, 4, r0); insn = deposit32(insn, 12, 4, r1); tcg_out32(s, insn); } static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1, TCGArg m2) { tcg_insn_unit insn = 0; tcg_debug_assert(m2 == extract32(m2, 0, 16)); insn = deposit32(insn, 0, 8, op); insn = deposit32(insn, 8, 4, r0); insn = deposit32(insn, 12, 4, r1); insn = deposit32(insn, 16, 16, m2); tcg_out32(s, insn); } static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1, TCGReg r2) { tcg_insn_unit insn = 0; insn = deposit32(insn, 0, 8, op); insn = deposit32(insn, 8, 4, r0); insn = deposit32(insn, 12, 4, r1); insn = deposit32(insn, 16, 4, r2); tcg_out32(s, insn); } static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1, intptr_t i2) { tcg_insn_unit insn = 0; tcg_debug_assert(i2 == sextract32(i2, 0, 16)); insn = deposit32(insn, 0, 8, op); insn = deposit32(insn, 8, 4, r0); insn = deposit32(insn, 12, 4, r1); insn = deposit32(insn, 16, 16, i2); tcg_out32(s, insn); } static void tcg_out_op_rrbb(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1, uint8_t b2, uint8_t b3) { tcg_insn_unit insn = 0; tcg_debug_assert(b2 == extract32(b2, 0, 6)); tcg_debug_assert(b3 == extract32(b3, 0, 6)); insn = deposit32(insn, 0, 8, op); insn = deposit32(insn, 8, 4, r0); insn = deposit32(insn, 12, 4, r1); insn = deposit32(insn, 16, 6, b2); insn = deposit32(insn, 22, 6, b3); tcg_out32(s, insn); } static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3) { tcg_insn_unit insn = 0; insn = deposit32(insn, 0, 8, op); insn = deposit32(insn, 8, 4, r0); insn = deposit32(insn, 12, 4, r1); insn = deposit32(insn, 16, 4, r2); insn = deposit32(insn, 20, 4, c3); tcg_out32(s, insn); } static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4) { tcg_insn_unit insn = 0; tcg_debug_assert(b3 == extract32(b3, 0, 6)); tcg_debug_assert(b4 == extract32(b4, 0, 6)); insn = deposit32(insn, 0, 8, op); insn = deposit32(insn, 8, 4, r0); insn = deposit32(insn, 12, 4, r1); insn = deposit32(insn, 16, 4, r2); insn = deposit32(insn, 20, 6, b3); insn = deposit32(insn, 26, 6, b4); tcg_out32(s, insn); } static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3) { tcg_insn_unit insn = 0; insn = deposit32(insn, 0, 8, op); insn = deposit32(insn, 8, 4, r0); insn = deposit32(insn, 12, 4, r1); insn = deposit32(insn, 16, 4, r2); insn = deposit32(insn, 20, 4, r3); tcg_out32(s, insn); } static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3, TCGReg r4, TCGCond c5) { tcg_insn_unit insn = 0; insn = deposit32(insn, 0, 8, op); insn = deposit32(insn, 8, 4, r0); insn = deposit32(insn, 12, 4, r1); insn = deposit32(insn, 16, 4, r2); insn = deposit32(insn, 20, 4, r3); insn = deposit32(insn, 24, 4, r4); insn = deposit32(insn, 28, 4, c5); tcg_out32(s, insn); } static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val, TCGReg base, intptr_t offset) { stack_bounds_check(base, offset); if (offset != sextract32(offset, 0, 16)) { tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset); tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base); base = TCG_REG_TMP; offset = 0; } tcg_out_op_rrs(s, op, val, base, offset); } static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base, intptr_t offset) { TCGOpcode op = INDEX_op_ld; if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { op = INDEX_op_ld32u; } tcg_out_ldst(s, op, val, base, offset); } static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { tcg_out_op_rr(s, INDEX_op_mov, ret, arg); return true; } static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long arg) { switch (type) { case TCG_TYPE_I32: #if TCG_TARGET_REG_BITS == 64 arg = (int32_t)arg; /* fall through */ case TCG_TYPE_I64: #endif break; default: g_assert_not_reached(); } if (arg == sextract32(arg, 0, 20)) { tcg_out_op_ri(s, INDEX_op_tci_movi, ret, arg); } else { tcg_insn_unit insn = 0; new_pool_label(s, arg, 20, s->code_ptr, 0); insn = deposit32(insn, 0, 8, INDEX_op_tci_movl); insn = deposit32(insn, 8, 4, ret); tcg_out32(s, insn); } } static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs, unsigned pos, unsigned len) { tcg_out_op_rrbb(s, INDEX_op_extract, rd, rs, pos, len); } static const TCGOutOpExtract outop_extract = { .base.static_constraint = C_O1_I1(r, r), .out_rr = tcg_out_extract, }; static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs, unsigned pos, unsigned len) { tcg_out_op_rrbb(s, INDEX_op_sextract, rd, rs, pos, len); } static const TCGOutOpExtract outop_sextract = { .base.static_constraint = C_O1_I1(r, r), .out_rr = tcg_out_sextract, }; static const TCGOutOpExtract2 outop_extract2 = { .base.static_constraint = C_NotImplemented, }; static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) { tcg_out_sextract(s, type, rd, rs, 0, 8); } static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs) { tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 8); } static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) { tcg_out_sextract(s, type, rd, rs, 0, 16); } static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs) { tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 16); } static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs) { tcg_debug_assert(TCG_TARGET_REG_BITS == 64); tcg_out_sextract(s, TCG_TYPE_I64, rd, rs, 0, 32); } static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs) { tcg_debug_assert(TCG_TARGET_REG_BITS == 64); tcg_out_extract(s, TCG_TYPE_I64, rd, rs, 0, 32); } static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) { tcg_out_ext32s(s, rd, rs); } static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) { tcg_out_ext32u(s, rd, rs); } static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs) { tcg_debug_assert(TCG_TARGET_REG_BITS == 64); tcg_out_mov(s, TCG_TYPE_I32, rd, rs); } static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) { return false; } static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, tcg_target_long imm) { /* This function is only used for passing structs by reference. */ g_assert_not_reached(); } static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func, const TCGHelperInfo *info) { ffi_cif *cif = info->cif; tcg_insn_unit insn = 0; uint8_t which; if (cif->rtype == &ffi_type_void) { which = 0; } else { tcg_debug_assert(cif->rtype->size == 4 || cif->rtype->size == 8 || cif->rtype->size == 16); which = ctz32(cif->rtype->size) - 1; } new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif); insn = deposit32(insn, 0, 8, INDEX_op_call); insn = deposit32(insn, 8, 4, which); tcg_out32(s, insn); } static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg) { tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg); } static void tcg_out_goto_tb(TCGContext *s, int which) { /* indirect jump method. */ tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which)); set_jmp_reset_offset(s, which); } static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0) { tcg_out_op_r(s, INDEX_op_goto_ptr, a0); } void tb_target_set_jmp_target(const TranslationBlock *tb, int n, uintptr_t jmp_rx, uintptr_t jmp_rw) { /* Always indirect, nothing to do */ } static void tgen_add(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2); } static const TCGOutOpBinary outop_add = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_add, }; static TCGConstraintSetIndex cset_addsubcarry(TCGType type, unsigned flags) { return type == TCG_TYPE_REG ? C_O1_I2(r, r, r) : C_NotImplemented; } static void tgen_addco(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { tcg_out_op_rrr(s, INDEX_op_addco, a0, a1, a2); } static const TCGOutOpBinary outop_addco = { .base.static_constraint = C_Dynamic, .base.dynamic_constraint = cset_addsubcarry, .out_rrr = tgen_addco, }; static void tgen_addci(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { tcg_out_op_rrr(s, INDEX_op_addci, a0, a1, a2); } static const TCGOutOpAddSubCarry outop_addci = { .base.static_constraint = C_Dynamic, .base.dynamic_constraint = cset_addsubcarry, .out_rrr = tgen_addci, }; static void tgen_addcio(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { tcg_out_op_rrr(s, INDEX_op_addcio, a0, a1, a2); } static const TCGOutOpBinary outop_addcio = { .base.static_constraint = C_Dynamic, .base.dynamic_constraint = cset_addsubcarry, .out_rrr = tgen_addcio, }; static void tcg_out_set_carry(TCGContext *s) { tcg_out_op_v(s, INDEX_op_tci_setcarry); } static void tgen_and(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { tcg_out_op_rrr(s, INDEX_op_and, a0, a1, a2); } static const TCGOutOpBinary outop_and = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_and, }; static void tgen_andc(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { tcg_out_op_rrr(s, INDEX_op_andc, a0, a1, a2); } static const TCGOutOpBinary outop_andc = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_andc, }; static void tgen_clz(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { TCGOpcode opc = (type == TCG_TYPE_I32 ? INDEX_op_tci_clz32 : INDEX_op_clz); tcg_out_op_rrr(s, opc, a0, a1, a2); } static const TCGOutOpBinary outop_clz = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_clz, }; static void tgen_ctz(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { TCGOpcode opc = (type == TCG_TYPE_I32 ? INDEX_op_tci_ctz32 : INDEX_op_ctz); tcg_out_op_rrr(s, opc, a0, a1, a2); } static const TCGOutOpBinary outop_ctz = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_ctz, }; static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2, unsigned ofs, unsigned len) { tcg_out_op_rrrbb(s, INDEX_op_deposit, a0, a1, a2, ofs, len); } static const TCGOutOpDeposit outop_deposit = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_deposit, }; static void tgen_divs(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { TCGOpcode opc = (type == TCG_TYPE_I32 ? INDEX_op_tci_divs32 : INDEX_op_divs); tcg_out_op_rrr(s, opc, a0, a1, a2); } static const TCGOutOpBinary outop_divs = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_divs, }; static const TCGOutOpDivRem outop_divs2 = { .base.static_constraint = C_NotImplemented, }; static void tgen_divu(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { TCGOpcode opc = (type == TCG_TYPE_I32 ? INDEX_op_tci_divu32 : INDEX_op_divu); tcg_out_op_rrr(s, opc, a0, a1, a2); } static const TCGOutOpBinary outop_divu = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_divu, }; static const TCGOutOpDivRem outop_divu2 = { .base.static_constraint = C_NotImplemented, }; static void tgen_eqv(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { tcg_out_op_rrr(s, INDEX_op_eqv, a0, a1, a2); } static const TCGOutOpBinary outop_eqv = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_eqv, }; #if TCG_TARGET_REG_BITS == 64 static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1) { tcg_out_extract(s, TCG_TYPE_I64, a0, a1, 32, 32); } static const TCGOutOpUnary outop_extrh_i64_i32 = { .base.static_constraint = C_O1_I1(r, r), .out_rr = tgen_extrh_i64_i32, }; #endif static void tgen_mul(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { tcg_out_op_rrr(s, INDEX_op_mul, a0, a1, a2); } static const TCGOutOpBinary outop_mul = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_mul, }; static TCGConstraintSetIndex cset_mul2(TCGType type, unsigned flags) { return type == TCG_TYPE_REG ? C_O2_I2(r, r, r, r) : C_NotImplemented; } static void tgen_muls2(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3) { tcg_out_op_rrrr(s, INDEX_op_muls2, a0, a1, a2, a3); } static const TCGOutOpMul2 outop_muls2 = { .base.static_constraint = C_Dynamic, .base.dynamic_constraint = cset_mul2, .out_rrrr = tgen_muls2, }; static const TCGOutOpBinary outop_mulsh = { .base.static_constraint = C_NotImplemented, }; static void tgen_mulu2(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3) { tcg_out_op_rrrr(s, INDEX_op_mulu2, a0, a1, a2, a3); } static const TCGOutOpMul2 outop_mulu2 = { .base.static_constraint = C_Dynamic, .base.dynamic_constraint = cset_mul2, .out_rrrr = tgen_mulu2, }; static const TCGOutOpBinary outop_muluh = { .base.static_constraint = C_NotImplemented, }; static void tgen_nand(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { tcg_out_op_rrr(s, INDEX_op_nand, a0, a1, a2); } static const TCGOutOpBinary outop_nand = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_nand, }; static void tgen_nor(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { tcg_out_op_rrr(s, INDEX_op_nor, a0, a1, a2); } static const TCGOutOpBinary outop_nor = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_nor, }; static void tgen_or(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { tcg_out_op_rrr(s, INDEX_op_or, a0, a1, a2); } static const TCGOutOpBinary outop_or = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_or, }; static void tgen_orc(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { tcg_out_op_rrr(s, INDEX_op_orc, a0, a1, a2); } static const TCGOutOpBinary outop_orc = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_orc, }; static void tgen_rems(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { TCGOpcode opc = (type == TCG_TYPE_I32 ? INDEX_op_tci_rems32 : INDEX_op_rems); tcg_out_op_rrr(s, opc, a0, a1, a2); } static const TCGOutOpBinary outop_rems = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_rems, }; static void tgen_remu(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { TCGOpcode opc = (type == TCG_TYPE_I32 ? INDEX_op_tci_remu32 : INDEX_op_remu); tcg_out_op_rrr(s, opc, a0, a1, a2); } static const TCGOutOpBinary outop_remu = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_remu, }; static void tgen_rotl(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { TCGOpcode opc = (type == TCG_TYPE_I32 ? INDEX_op_tci_rotl32 : INDEX_op_rotl); tcg_out_op_rrr(s, opc, a0, a1, a2); } static const TCGOutOpBinary outop_rotl = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_rotl, }; static void tgen_rotr(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { TCGOpcode opc = (type == TCG_TYPE_I32 ? INDEX_op_tci_rotr32 : INDEX_op_rotr); tcg_out_op_rrr(s, opc, a0, a1, a2); } static const TCGOutOpBinary outop_rotr = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_rotr, }; static void tgen_sar(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { if (type < TCG_TYPE_REG) { tcg_out_ext32s(s, TCG_REG_TMP, a1); a1 = TCG_REG_TMP; } tcg_out_op_rrr(s, INDEX_op_sar, a0, a1, a2); } static const TCGOutOpBinary outop_sar = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_sar, }; static void tgen_shl(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { tcg_out_op_rrr(s, INDEX_op_shl, a0, a1, a2); } static const TCGOutOpBinary outop_shl = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_shl, }; static void tgen_shr(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { if (type < TCG_TYPE_REG) { tcg_out_ext32u(s, TCG_REG_TMP, a1); a1 = TCG_REG_TMP; } tcg_out_op_rrr(s, INDEX_op_shr, a0, a1, a2); } static const TCGOutOpBinary outop_shr = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_shr, }; static void tgen_sub(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { tcg_out_op_rrr(s, INDEX_op_sub, a0, a1, a2); } static const TCGOutOpSubtract outop_sub = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_sub, }; static void tgen_subbo(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { tcg_out_op_rrr(s, INDEX_op_subbo, a0, a1, a2); } static const TCGOutOpAddSubCarry outop_subbo = { .base.static_constraint = C_Dynamic, .base.dynamic_constraint = cset_addsubcarry, .out_rrr = tgen_subbo, }; static void tgen_subbi(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { tcg_out_op_rrr(s, INDEX_op_subbi, a0, a1, a2); } static const TCGOutOpAddSubCarry outop_subbi = { .base.static_constraint = C_Dynamic, .base.dynamic_constraint = cset_addsubcarry, .out_rrr = tgen_subbi, }; static void tgen_subbio(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { tcg_out_op_rrr(s, INDEX_op_subbio, a0, a1, a2); } static const TCGOutOpAddSubCarry outop_subbio = { .base.static_constraint = C_Dynamic, .base.dynamic_constraint = cset_addsubcarry, .out_rrr = tgen_subbio, }; static void tcg_out_set_borrow(TCGContext *s) { tcg_out_op_v(s, INDEX_op_tci_setcarry); /* borrow == carry */ } static void tgen_xor(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) { tcg_out_op_rrr(s, INDEX_op_xor, a0, a1, a2); } static const TCGOutOpBinary outop_xor = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_xor, }; static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) { tcg_out_op_rr(s, INDEX_op_ctpop, a0, a1); } static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags) { return type == TCG_TYPE_REG ? C_O1_I1(r, r) : C_NotImplemented; } static const TCGOutOpUnary outop_ctpop = { .base.static_constraint = C_Dynamic, .base.dynamic_constraint = cset_ctpop, .out_rr = tgen_ctpop, }; static void tgen_bswap16(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, unsigned flags) { tcg_out_op_rr(s, INDEX_op_bswap16, a0, a1); if (flags & TCG_BSWAP_OS) { tcg_out_sextract(s, TCG_TYPE_REG, a0, a0, 0, 16); } } static const TCGOutOpBswap outop_bswap16 = { .base.static_constraint = C_O1_I1(r, r), .out_rr = tgen_bswap16, }; static void tgen_bswap32(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, unsigned flags) { tcg_out_op_rr(s, INDEX_op_bswap32, a0, a1); if (flags & TCG_BSWAP_OS) { tcg_out_sextract(s, TCG_TYPE_REG, a0, a0, 0, 32); } } static const TCGOutOpBswap outop_bswap32 = { .base.static_constraint = C_O1_I1(r, r), .out_rr = tgen_bswap32, }; #if TCG_TARGET_REG_BITS == 64 static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) { tcg_out_op_rr(s, INDEX_op_bswap64, a0, a1); } static const TCGOutOpUnary outop_bswap64 = { .base.static_constraint = C_O1_I1(r, r), .out_rr = tgen_bswap64, }; #endif static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) { tcg_out_op_rr(s, INDEX_op_neg, a0, a1); } static const TCGOutOpUnary outop_neg = { .base.static_constraint = C_O1_I1(r, r), .out_rr = tgen_neg, }; static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) { tcg_out_op_rr(s, INDEX_op_not, a0, a1); } static const TCGOutOpUnary outop_not = { .base.static_constraint = C_O1_I1(r, r), .out_rr = tgen_not, }; static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, TCGReg dest, TCGReg arg1, TCGReg arg2) { TCGOpcode opc = (type == TCG_TYPE_I32 ? INDEX_op_tci_setcond32 : INDEX_op_setcond); tcg_out_op_rrrc(s, opc, dest, arg1, arg2, cond); } static const TCGOutOpSetcond outop_setcond = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_setcond, }; static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond, TCGReg dest, TCGReg arg1, TCGReg arg2) { tgen_setcond(s, type, cond, dest, arg1, arg2); tgen_neg(s, type, dest, dest); } static const TCGOutOpSetcond outop_negsetcond = { .base.static_constraint = C_O1_I2(r, r, r), .out_rrr = tgen_negsetcond, }; static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond, TCGReg arg0, TCGReg arg1, TCGLabel *l) { tgen_setcond(s, type, cond, TCG_REG_TMP, arg0, arg1); tcg_out_op_rl(s, INDEX_op_brcond, TCG_REG_TMP, l); } static const TCGOutOpBrcond outop_brcond = { .base.static_constraint = C_O0_I2(r, r), .out_rr = tgen_brcond, }; static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond, TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2, TCGArg vt, bool const_vt, TCGArg vf, bool consf_vf) { TCGOpcode opc = (type == TCG_TYPE_I32 ? INDEX_op_tci_movcond32 : INDEX_op_movcond); tcg_out_op_rrrrrc(s, opc, ret, c1, c2, vt, vf, cond); } static const TCGOutOpMovcond outop_movcond = { .base.static_constraint = C_O1_I4(r, r, r, r, r), .out = tgen_movcond, }; static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, TCGArg bl, bool const_bl, TCGArg bh, bool const_bh, TCGLabel *l) { tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP, al, ah, bl, bh, cond); tcg_out_op_rl(s, INDEX_op_brcond, TCG_REG_TMP, l); } #if TCG_TARGET_REG_BITS != 32 __attribute__((unused)) #endif static const TCGOutOpBrcond2 outop_brcond2 = { .base.static_constraint = C_O0_I4(r, r, r, r), .out = tgen_brcond2, }; static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg al, TCGReg ah, TCGArg bl, bool const_bl, TCGArg bh, bool const_bh) { tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, ret, al, ah, bl, bh, cond); } #if TCG_TARGET_REG_BITS != 32 __attribute__((unused)) #endif static const TCGOutOpSetcond2 outop_setcond2 = { .base.static_constraint = C_O1_I4(r, r, r, r, r), .out = tgen_setcond2, }; static void tcg_out_mb(TCGContext *s, unsigned a0) { tcg_out_op_v(s, INDEX_op_mb); } static void tcg_out_br(TCGContext *s, TCGLabel *l) { tcg_out_op_l(s, INDEX_op_br, l); } static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg base, ptrdiff_t offset) { tcg_out_ldst(s, INDEX_op_ld8u, dest, base, offset); } static const TCGOutOpLoad outop_ld8u = { .base.static_constraint = C_O1_I1(r, r), .out = tgen_ld8u, }; static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg base, ptrdiff_t offset) { tcg_out_ldst(s, INDEX_op_ld8s, dest, base, offset); } static const TCGOutOpLoad outop_ld8s = { .base.static_constraint = C_O1_I1(r, r), .out = tgen_ld8s, }; static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg base, ptrdiff_t offset) { tcg_out_ldst(s, INDEX_op_ld16u, dest, base, offset); } static const TCGOutOpLoad outop_ld16u = { .base.static_constraint = C_O1_I1(r, r), .out = tgen_ld16u, }; static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg base, ptrdiff_t offset) { tcg_out_ldst(s, INDEX_op_ld16s, dest, base, offset); } static const TCGOutOpLoad outop_ld16s = { .base.static_constraint = C_O1_I1(r, r), .out = tgen_ld16s, }; #if TCG_TARGET_REG_BITS == 64 static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest, TCGReg base, ptrdiff_t offset) { tcg_out_ldst(s, INDEX_op_ld32u, dest, base, offset); } static const TCGOutOpLoad outop_ld32u = { .base.static_constraint = C_O1_I1(r, r), .out = tgen_ld32u, }; static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest, TCGReg base, ptrdiff_t offset) { tcg_out_ldst(s, INDEX_op_ld32s, dest, base, offset); } static const TCGOutOpLoad outop_ld32s = { .base.static_constraint = C_O1_I1(r, r), .out = tgen_ld32s, }; #endif static void tgen_st8(TCGContext *s, TCGType type, TCGReg data, TCGReg base, ptrdiff_t offset) { tcg_out_ldst(s, INDEX_op_st8, data, base, offset); } static const TCGOutOpStore outop_st8 = { .base.static_constraint = C_O0_I2(r, r), .out_r = tgen_st8, }; static void tgen_st16(TCGContext *s, TCGType type, TCGReg data, TCGReg base, ptrdiff_t offset) { tcg_out_ldst(s, INDEX_op_st16, data, base, offset); } static const TCGOutOpStore outop_st16 = { .base.static_constraint = C_O0_I2(r, r), .out_r = tgen_st16, }; static const TCGOutOpStore outop_st = { .base.static_constraint = C_O0_I2(r, r), .out_r = tcg_out_st, }; static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data, TCGReg addr, MemOpIdx oi) { tcg_out_op_rrm(s, INDEX_op_qemu_ld, data, addr, oi); } static const TCGOutOpQemuLdSt outop_qemu_ld = { .base.static_constraint = C_O1_I1(r, r), .out = tgen_qemu_ld, }; static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo, TCGReg datahi, TCGReg addr, MemOpIdx oi) { tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, oi); tcg_out_op_rrrr(s, INDEX_op_qemu_ld2, datalo, datahi, addr, TCG_REG_TMP); } static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = { .base.static_constraint = TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O2_I1(r, r, r), .out = TCG_TARGET_REG_BITS == 64 ? NULL : tgen_qemu_ld2, }; static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data, TCGReg addr, MemOpIdx oi) { tcg_out_op_rrm(s, INDEX_op_qemu_st, data, addr, oi); } static const TCGOutOpQemuLdSt outop_qemu_st = { .base.static_constraint = C_O0_I2(r, r), .out = tgen_qemu_st, }; static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo, TCGReg datahi, TCGReg addr, MemOpIdx oi) { tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, oi); tcg_out_op_rrrr(s, INDEX_op_qemu_st2, datalo, datahi, addr, TCG_REG_TMP); } static const TCGOutOpQemuLdSt2 outop_qemu_st2 = { .base.static_constraint = TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O0_I3(r, r, r), .out = TCG_TARGET_REG_BITS == 64 ? NULL : tgen_qemu_st2, }; static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base, intptr_t offset) { TCGOpcode op = INDEX_op_st; if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { op = INDEX_op_st32; } tcg_out_ldst(s, op, val, base, offset); } static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, TCGReg base, intptr_t ofs) { return false; } /* Test if a constant matches the constraint. */ static bool tcg_target_const_match(int64_t val, int ct, TCGType type, TCGCond cond, int vece) { return ct & TCG_CT_CONST; } static void tcg_out_nop_fill(tcg_insn_unit *p, int count) { memset(p, 0, sizeof(*p) * count); } static void tcg_target_init(TCGContext *s) { /* The current code uses uint8_t for tcg operations. */ tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX); /* Registers available for 32 bit operations. */ tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1; /* Registers available for 64 bit operations. */ tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1; /* * The interpreter "registers" are in the local stack frame and * cannot be clobbered by the called helper functions. However, * the interpreter assumes a 128-bit return value and assigns to * the return value registers. */ tcg_target_call_clobber_regs = MAKE_64BIT_MASK(TCG_REG_R0, 128 / TCG_TARGET_REG_BITS); s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); /* The call arguments come first, followed by the temp storage. */ tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, TCG_STATIC_FRAME_SIZE); } /* Generate global QEMU prologue and epilogue code. */ static inline void tcg_target_qemu_prologue(TCGContext *s) { } static void tcg_out_tb_start(TCGContext *s) { /* nothing to do */ } bool tcg_target_has_memory_bswap(MemOp memop) { return true; } static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { g_assert_not_reached(); } static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { g_assert_not_reached(); }