mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-04 16:23:55 -06:00
softmmu: Use async_run_on_cpu in tcg_commit
tcg: Remove vecop_list check from tcg_gen_not_vec tcg/sparc64: Disable TCG_TARGET_HAS_extr_i64_i32 Revert "include/exec: typedef abi_ptr to vaddr in softmmu" -----BEGIN PGP SIGNATURE----- iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmTuOcYdHHJpY2hhcmQu aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV9gGwf/QzbRP8MknlCZXrkZ ykIZAkE4gRUb64Wzc2Oz1XaueltgBGRCS338oE8umtcLKTngZ8rVFD/LPTIEyAEY SOzdHEJLPMSUv54rjAV8W4mVku81E9QvzOgz8PIzFM0mDiPJ/lG6JBTee/IZJHr3 cW9W/2XMEz2rS2ONPj7WXbVbk/1ao29JFlhcWKNauUfqrNWK+VWOpo2w5qfgJruz mjOSiMErU7SijytrKG9GP3Ri1JGskocfGcYYPofz8j6lmQoZrT6aYUj2tJTL8rvQ Js+JCP8ZCXFO8/2jJqOivQccBGmLi8wf6Ke777xE0tAqfzXqBOp4tvJbv28e8lja p+Lqhg== =KPna -----END PGP SIGNATURE----- Merge tag 'pull-tcg-20230829-2' of https://gitlab.com/rth7680/qemu into staging softmmu: Use async_run_on_cpu in tcg_commit tcg: Remove vecop_list check from tcg_gen_not_vec tcg/sparc64: Disable TCG_TARGET_HAS_extr_i64_i32 Revert "include/exec: typedef abi_ptr to vaddr in softmmu" # -----BEGIN PGP SIGNATURE----- # # iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmTuOcYdHHJpY2hhcmQu # aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV9gGwf/QzbRP8MknlCZXrkZ # ykIZAkE4gRUb64Wzc2Oz1XaueltgBGRCS338oE8umtcLKTngZ8rVFD/LPTIEyAEY # SOzdHEJLPMSUv54rjAV8W4mVku81E9QvzOgz8PIzFM0mDiPJ/lG6JBTee/IZJHr3 # cW9W/2XMEz2rS2ONPj7WXbVbk/1ao29JFlhcWKNauUfqrNWK+VWOpo2w5qfgJruz # mjOSiMErU7SijytrKG9GP3Ri1JGskocfGcYYPofz8j6lmQoZrT6aYUj2tJTL8rvQ # Js+JCP8ZCXFO8/2jJqOivQccBGmLi8wf6Ke777xE0tAqfzXqBOp4tvJbv28e8lja # p+Lqhg== # =KPna # -----END PGP SIGNATURE----- # gpg: Signature made Tue 29 Aug 2023 14:32:38 EDT # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * tag 'pull-tcg-20230829-2' of https://gitlab.com/rth7680/qemu: Revert "include/exec: typedef abi_ptr to vaddr in softmmu" tcg/sparc64: Disable TCG_TARGET_HAS_extr_i64_i32 tcg: Remove vecop_list check from tcg_gen_not_vec softmmu: Use async_run_on_cpu in tcg_commit softmmu: Assert data in bounds in iotlb_to_section Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
ef9d20c63b
7 changed files with 43 additions and 62 deletions
|
@ -33,36 +33,6 @@ void cpu_loop_exit_noexc(CPUState *cpu)
|
||||||
cpu_loop_exit(cpu);
|
cpu_loop_exit(cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_SOFTMMU)
|
|
||||||
void cpu_reloading_memory_map(void)
|
|
||||||
{
|
|
||||||
if (qemu_in_vcpu_thread() && current_cpu->running) {
|
|
||||||
/* The guest can in theory prolong the RCU critical section as long
|
|
||||||
* as it feels like. The major problem with this is that because it
|
|
||||||
* can do multiple reconfigurations of the memory map within the
|
|
||||||
* critical section, we could potentially accumulate an unbounded
|
|
||||||
* collection of memory data structures awaiting reclamation.
|
|
||||||
*
|
|
||||||
* Because the only thing we're currently protecting with RCU is the
|
|
||||||
* memory data structures, it's sufficient to break the critical section
|
|
||||||
* in this callback, which we know will get called every time the
|
|
||||||
* memory map is rearranged.
|
|
||||||
*
|
|
||||||
* (If we add anything else in the system that uses RCU to protect
|
|
||||||
* its data structures, we will need to implement some other mechanism
|
|
||||||
* to force TCG CPUs to exit the critical section, at which point this
|
|
||||||
* part of this callback might become unnecessary.)
|
|
||||||
*
|
|
||||||
* This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
|
|
||||||
* only protects cpu->as->dispatch. Since we know our caller is about
|
|
||||||
* to reload it, it's safe to split the critical section.
|
|
||||||
*/
|
|
||||||
rcu_read_unlock();
|
|
||||||
rcu_read_lock();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void cpu_loop_exit(CPUState *cpu)
|
void cpu_loop_exit(CPUState *cpu)
|
||||||
{
|
{
|
||||||
/* Undo the setting in cpu_tb_exec. */
|
/* Undo the setting in cpu_tb_exec. */
|
||||||
|
|
|
@ -133,7 +133,6 @@ static inline void cpu_physical_memory_write(hwaddr addr,
|
||||||
{
|
{
|
||||||
cpu_physical_memory_rw(addr, (void *)buf, len, true);
|
cpu_physical_memory_rw(addr, (void *)buf, len, true);
|
||||||
}
|
}
|
||||||
void cpu_reloading_memory_map(void);
|
|
||||||
void *cpu_physical_memory_map(hwaddr addr,
|
void *cpu_physical_memory_map(hwaddr addr,
|
||||||
hwaddr *plen,
|
hwaddr *plen,
|
||||||
bool is_write);
|
bool is_write);
|
||||||
|
|
|
@ -121,8 +121,8 @@ static inline bool guest_range_valid_untagged(abi_ulong start, abi_ulong len)
|
||||||
h2g_nocheck(x); \
|
h2g_nocheck(x); \
|
||||||
})
|
})
|
||||||
#else
|
#else
|
||||||
typedef vaddr abi_ptr;
|
typedef target_ulong abi_ptr;
|
||||||
#define TARGET_ABI_FMT_ptr "%016" VADDR_PRIx
|
#define TARGET_ABI_FMT_ptr TARGET_FMT_lx
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
|
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
|
||||||
|
|
|
@ -680,8 +680,7 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr,
|
||||||
IOMMUTLBEntry iotlb;
|
IOMMUTLBEntry iotlb;
|
||||||
int iommu_idx;
|
int iommu_idx;
|
||||||
hwaddr addr = orig_addr;
|
hwaddr addr = orig_addr;
|
||||||
AddressSpaceDispatch *d =
|
AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
|
||||||
qatomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
|
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
section = address_space_translate_internal(d, addr, &addr, plen, false);
|
section = address_space_translate_internal(d, addr, &addr, plen, false);
|
||||||
|
@ -2412,10 +2411,16 @@ MemoryRegionSection *iotlb_to_section(CPUState *cpu,
|
||||||
{
|
{
|
||||||
int asidx = cpu_asidx_from_attrs(cpu, attrs);
|
int asidx = cpu_asidx_from_attrs(cpu, attrs);
|
||||||
CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
|
CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
|
||||||
AddressSpaceDispatch *d = qatomic_rcu_read(&cpuas->memory_dispatch);
|
AddressSpaceDispatch *d = cpuas->memory_dispatch;
|
||||||
MemoryRegionSection *sections = d->map.sections;
|
int section_index = index & ~TARGET_PAGE_MASK;
|
||||||
|
MemoryRegionSection *ret;
|
||||||
|
|
||||||
return §ions[index & ~TARGET_PAGE_MASK];
|
assert(section_index < d->map.sections_nb);
|
||||||
|
ret = d->map.sections + section_index;
|
||||||
|
assert(ret->mr);
|
||||||
|
assert(ret->mr->ops);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void io_mem_init(void)
|
static void io_mem_init(void)
|
||||||
|
@ -2481,23 +2486,42 @@ static void tcg_log_global_after_sync(MemoryListener *listener)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void tcg_commit_cpu(CPUState *cpu, run_on_cpu_data data)
|
||||||
|
{
|
||||||
|
CPUAddressSpace *cpuas = data.host_ptr;
|
||||||
|
|
||||||
|
cpuas->memory_dispatch = address_space_to_dispatch(cpuas->as);
|
||||||
|
tlb_flush(cpu);
|
||||||
|
}
|
||||||
|
|
||||||
static void tcg_commit(MemoryListener *listener)
|
static void tcg_commit(MemoryListener *listener)
|
||||||
{
|
{
|
||||||
CPUAddressSpace *cpuas;
|
CPUAddressSpace *cpuas;
|
||||||
AddressSpaceDispatch *d;
|
CPUState *cpu;
|
||||||
|
|
||||||
assert(tcg_enabled());
|
assert(tcg_enabled());
|
||||||
/* since each CPU stores ram addresses in its TLB cache, we must
|
/* since each CPU stores ram addresses in its TLB cache, we must
|
||||||
reset the modified entries */
|
reset the modified entries */
|
||||||
cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
|
cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
|
||||||
cpu_reloading_memory_map();
|
cpu = cpuas->cpu;
|
||||||
/* The CPU and TLB are protected by the iothread lock.
|
|
||||||
* We reload the dispatch pointer now because cpu_reloading_memory_map()
|
/*
|
||||||
* may have split the RCU critical section.
|
* Defer changes to as->memory_dispatch until the cpu is quiescent.
|
||||||
|
* Otherwise we race between (1) other cpu threads and (2) ongoing
|
||||||
|
* i/o for the current cpu thread, with data cached by mmu_lookup().
|
||||||
|
*
|
||||||
|
* In addition, queueing the work function will kick the cpu back to
|
||||||
|
* the main loop, which will end the RCU critical section and reclaim
|
||||||
|
* the memory data structures.
|
||||||
|
*
|
||||||
|
* That said, the listener is also called during realize, before
|
||||||
|
* all of the tcg machinery for run-on is initialized: thus halt_cond.
|
||||||
*/
|
*/
|
||||||
d = address_space_to_dispatch(cpuas->as);
|
if (cpu->halt_cond) {
|
||||||
qatomic_rcu_set(&cpuas->memory_dispatch, d);
|
async_run_on_cpu(cpu, tcg_commit_cpu, RUN_ON_CPU_HOST_PTR(cpuas));
|
||||||
tlb_flush(cpuas->cpu);
|
} else {
|
||||||
|
tcg_commit_cpu(cpu, RUN_ON_CPU_HOST_PTR(cpuas));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void memory_map_init(void)
|
static void memory_map_init(void)
|
||||||
|
|
|
@ -529,11 +529,6 @@ static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
|
||||||
tcg_out_ext32u(s, rd, rs);
|
tcg_out_ext32u(s, rd, rs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
|
|
||||||
{
|
|
||||||
tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
|
static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
|
@ -1444,9 +1439,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
||||||
case INDEX_op_divu_i64:
|
case INDEX_op_divu_i64:
|
||||||
c = ARITH_UDIVX;
|
c = ARITH_UDIVX;
|
||||||
goto gen_arith;
|
goto gen_arith;
|
||||||
case INDEX_op_extrh_i64_i32:
|
|
||||||
tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case INDEX_op_brcond_i64:
|
case INDEX_op_brcond_i64:
|
||||||
tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
|
tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
|
||||||
|
@ -1501,7 +1493,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
||||||
case INDEX_op_ext32u_i64:
|
case INDEX_op_ext32u_i64:
|
||||||
case INDEX_op_ext_i32_i64:
|
case INDEX_op_ext_i32_i64:
|
||||||
case INDEX_op_extu_i32_i64:
|
case INDEX_op_extu_i32_i64:
|
||||||
case INDEX_op_extrl_i64_i32:
|
|
||||||
default:
|
default:
|
||||||
g_assert_not_reached();
|
g_assert_not_reached();
|
||||||
}
|
}
|
||||||
|
@ -1533,8 +1524,6 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
|
||||||
case INDEX_op_ext32u_i64:
|
case INDEX_op_ext32u_i64:
|
||||||
case INDEX_op_ext_i32_i64:
|
case INDEX_op_ext_i32_i64:
|
||||||
case INDEX_op_extu_i32_i64:
|
case INDEX_op_extu_i32_i64:
|
||||||
case INDEX_op_extrl_i64_i32:
|
|
||||||
case INDEX_op_extrh_i64_i32:
|
|
||||||
case INDEX_op_qemu_ld_a32_i32:
|
case INDEX_op_qemu_ld_a32_i32:
|
||||||
case INDEX_op_qemu_ld_a64_i32:
|
case INDEX_op_qemu_ld_a64_i32:
|
||||||
case INDEX_op_qemu_ld_a32_i64:
|
case INDEX_op_qemu_ld_a32_i64:
|
||||||
|
|
|
@ -115,7 +115,7 @@ extern bool use_vis3_instructions;
|
||||||
#define TCG_TARGET_HAS_mulsh_i32 0
|
#define TCG_TARGET_HAS_mulsh_i32 0
|
||||||
#define TCG_TARGET_HAS_qemu_st8_i32 0
|
#define TCG_TARGET_HAS_qemu_st8_i32 0
|
||||||
|
|
||||||
#define TCG_TARGET_HAS_extr_i64_i32 1
|
#define TCG_TARGET_HAS_extr_i64_i32 0
|
||||||
#define TCG_TARGET_HAS_div_i64 1
|
#define TCG_TARGET_HAS_div_i64 1
|
||||||
#define TCG_TARGET_HAS_rem_i64 0
|
#define TCG_TARGET_HAS_rem_i64 0
|
||||||
#define TCG_TARGET_HAS_rot_i64 0
|
#define TCG_TARGET_HAS_rot_i64 0
|
||||||
|
|
|
@ -391,12 +391,11 @@ static bool do_op2(unsigned vece, TCGv_vec r, TCGv_vec a, TCGOpcode opc)
|
||||||
|
|
||||||
void tcg_gen_not_vec(unsigned vece, TCGv_vec r, TCGv_vec a)
|
void tcg_gen_not_vec(unsigned vece, TCGv_vec r, TCGv_vec a)
|
||||||
{
|
{
|
||||||
const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
|
if (TCG_TARGET_HAS_not_vec) {
|
||||||
|
vec_gen_op2(INDEX_op_not_vec, 0, r, a);
|
||||||
if (!TCG_TARGET_HAS_not_vec || !do_op2(vece, r, a, INDEX_op_not_vec)) {
|
} else {
|
||||||
tcg_gen_xor_vec(0, r, a, tcg_constant_vec_matching(r, 0, -1));
|
tcg_gen_xor_vec(0, r, a, tcg_constant_vec_matching(r, 0, -1));
|
||||||
}
|
}
|
||||||
tcg_swap_vecop_list(hold_list);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void tcg_gen_neg_vec(unsigned vece, TCGv_vec r, TCGv_vec a)
|
void tcg_gen_neg_vec(unsigned vece, TCGv_vec r, TCGv_vec a)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue