mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-09 10:34:58 -06:00
qemu/atomic.h: rename atomic_ to qatomic_
clang's C11 atomic_fetch_*() functions only take a C11 atomic type pointer argument. QEMU uses direct types (int, etc) and this causes a compiler error when a QEMU code calls these functions in a source file that also included <stdatomic.h> via a system header file: $ CC=clang CXX=clang++ ./configure ... && make ../util/async.c:79:17: error: address argument to atomic operation must be a pointer to _Atomic type ('unsigned int *' invalid) Avoid using atomic_*() names in QEMU's atomic.h since that namespace is used by <stdatomic.h>. Prefix QEMU's APIs with 'q' so that atomic.h and <stdatomic.h> can co-exist. I checked /usr/include on my machine and searched GitHub for existing "qatomic_" users but there seem to be none. This patch was generated using: $ git grep -h -o '\<atomic\(64\)\?_[a-z0-9_]\+' include/qemu/atomic.h | \ sort -u >/tmp/changed_identifiers $ for identifier in $(</tmp/changed_identifiers); do sed -i "s%\<$identifier\>%q$identifier%g" \ $(git grep -I -l "\<$identifier\>") done I manually fixed line-wrap issues and misaligned rST tables. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Acked-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200923105646.47864-1-stefanha@redhat.com>
This commit is contained in:
parent
ed7db34b5a
commit
d73415a315
133 changed files with 1041 additions and 1018 deletions
|
@ -2379,7 +2379,7 @@ static __thread bool have_sigbus_pending;
|
|||
|
||||
static void kvm_cpu_kick(CPUState *cpu)
|
||||
{
|
||||
atomic_set(&cpu->kvm_run->immediate_exit, 1);
|
||||
qatomic_set(&cpu->kvm_run->immediate_exit, 1);
|
||||
}
|
||||
|
||||
static void kvm_cpu_kick_self(void)
|
||||
|
@ -2400,7 +2400,7 @@ static void kvm_eat_signals(CPUState *cpu)
|
|||
int r;
|
||||
|
||||
if (kvm_immediate_exit) {
|
||||
atomic_set(&cpu->kvm_run->immediate_exit, 0);
|
||||
qatomic_set(&cpu->kvm_run->immediate_exit, 0);
|
||||
/* Write kvm_run->immediate_exit before the cpu->exit_request
|
||||
* write in kvm_cpu_exec.
|
||||
*/
|
||||
|
@ -2434,7 +2434,7 @@ int kvm_cpu_exec(CPUState *cpu)
|
|||
DPRINTF("kvm_cpu_exec()\n");
|
||||
|
||||
if (kvm_arch_process_async_events(cpu)) {
|
||||
atomic_set(&cpu->exit_request, 0);
|
||||
qatomic_set(&cpu->exit_request, 0);
|
||||
return EXCP_HLT;
|
||||
}
|
||||
|
||||
|
@ -2450,7 +2450,7 @@ int kvm_cpu_exec(CPUState *cpu)
|
|||
}
|
||||
|
||||
kvm_arch_pre_run(cpu, run);
|
||||
if (atomic_read(&cpu->exit_request)) {
|
||||
if (qatomic_read(&cpu->exit_request)) {
|
||||
DPRINTF("interrupt exit requested\n");
|
||||
/*
|
||||
* KVM requires us to reenter the kernel after IO exits to complete
|
||||
|
@ -2577,7 +2577,7 @@ int kvm_cpu_exec(CPUState *cpu)
|
|||
vm_stop(RUN_STATE_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
atomic_set(&cpu->exit_request, 0);
|
||||
qatomic_set(&cpu->exit_request, 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2994,7 +2994,7 @@ int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
|
|||
have_sigbus_pending = true;
|
||||
pending_sigbus_addr = addr;
|
||||
pending_sigbus_code = code;
|
||||
atomic_set(&cpu->exit_request, 1);
|
||||
qatomic_set(&cpu->exit_request, 1);
|
||||
return 0;
|
||||
#else
|
||||
return 1;
|
||||
|
|
|
@ -83,7 +83,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
|
|||
#if DATA_SIZE == 16
|
||||
ret = atomic16_cmpxchg(haddr, cmpv, newv);
|
||||
#else
|
||||
ret = atomic_cmpxchg__nocheck(haddr, cmpv, newv);
|
||||
ret = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);
|
||||
#endif
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_rmw_post(env, addr, info);
|
||||
|
@ -131,7 +131,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
|
|||
ATOMIC_MMU_IDX);
|
||||
|
||||
atomic_trace_rmw_pre(env, addr, info);
|
||||
ret = atomic_xchg__nocheck(haddr, val);
|
||||
ret = qatomic_xchg__nocheck(haddr, val);
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_rmw_post(env, addr, info);
|
||||
return ret;
|
||||
|
@ -147,7 +147,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
|||
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \
|
||||
ATOMIC_MMU_IDX); \
|
||||
atomic_trace_rmw_pre(env, addr, info); \
|
||||
ret = atomic_##X(haddr, val); \
|
||||
ret = qatomic_##X(haddr, val); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, info); \
|
||||
return ret; \
|
||||
|
@ -182,10 +182,10 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
|||
ATOMIC_MMU_IDX); \
|
||||
atomic_trace_rmw_pre(env, addr, info); \
|
||||
smp_mb(); \
|
||||
cmp = atomic_read__nocheck(haddr); \
|
||||
cmp = qatomic_read__nocheck(haddr); \
|
||||
do { \
|
||||
old = cmp; new = FN(old, val); \
|
||||
cmp = atomic_cmpxchg__nocheck(haddr, old, new); \
|
||||
cmp = qatomic_cmpxchg__nocheck(haddr, old, new); \
|
||||
} while (cmp != old); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, info); \
|
||||
|
@ -230,7 +230,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
|
|||
#if DATA_SIZE == 16
|
||||
ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv));
|
||||
#else
|
||||
ret = atomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
|
||||
ret = qatomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
|
||||
#endif
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_rmw_post(env, addr, info);
|
||||
|
@ -280,7 +280,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
|
|||
ATOMIC_MMU_IDX);
|
||||
|
||||
atomic_trace_rmw_pre(env, addr, info);
|
||||
ret = atomic_xchg__nocheck(haddr, BSWAP(val));
|
||||
ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_rmw_post(env, addr, info);
|
||||
return BSWAP(ret);
|
||||
|
@ -296,7 +296,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
|||
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \
|
||||
false, ATOMIC_MMU_IDX); \
|
||||
atomic_trace_rmw_pre(env, addr, info); \
|
||||
ret = atomic_##X(haddr, BSWAP(val)); \
|
||||
ret = qatomic_##X(haddr, BSWAP(val)); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, info); \
|
||||
return BSWAP(ret); \
|
||||
|
@ -329,10 +329,10 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
|||
false, ATOMIC_MMU_IDX); \
|
||||
atomic_trace_rmw_pre(env, addr, info); \
|
||||
smp_mb(); \
|
||||
ldn = atomic_read__nocheck(haddr); \
|
||||
ldn = qatomic_read__nocheck(haddr); \
|
||||
do { \
|
||||
ldo = ldn; old = BSWAP(ldo); new = FN(old, val); \
|
||||
ldn = atomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
|
||||
ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
|
||||
} while (ldo != ldn); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, info); \
|
||||
|
|
|
@ -367,7 +367,8 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
|
|||
goto out_unlock_next;
|
||||
}
|
||||
/* Atomically claim the jump destination slot only if it was NULL */
|
||||
old = atomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, (uintptr_t)tb_next);
|
||||
old = qatomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL,
|
||||
(uintptr_t)tb_next);
|
||||
if (old) {
|
||||
goto out_unlock_next;
|
||||
}
|
||||
|
@ -407,7 +408,7 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
|
|||
tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
|
||||
mmap_unlock();
|
||||
/* We add the TB in the virtual pc hash table for the fast lookup */
|
||||
atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
|
||||
qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
|
||||
}
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/* We don't take care of direct jumps when address mapping changes in
|
||||
|
@ -536,9 +537,9 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||
* Ensure zeroing happens before reading cpu->exit_request or
|
||||
* cpu->interrupt_request (see also smp_wmb in cpu_exit())
|
||||
*/
|
||||
atomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
|
||||
qatomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
|
||||
|
||||
if (unlikely(atomic_read(&cpu->interrupt_request))) {
|
||||
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
|
||||
int interrupt_request;
|
||||
qemu_mutex_lock_iothread();
|
||||
interrupt_request = cpu->interrupt_request;
|
||||
|
@ -613,10 +614,10 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||
}
|
||||
|
||||
/* Finally, check if we need to exit to the main loop. */
|
||||
if (unlikely(atomic_read(&cpu->exit_request))
|
||||
if (unlikely(qatomic_read(&cpu->exit_request))
|
||||
|| (use_icount
|
||||
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
|
||||
atomic_set(&cpu->exit_request, 0);
|
||||
qatomic_set(&cpu->exit_request, 0);
|
||||
if (cpu->exception_index == -1) {
|
||||
cpu->exception_index = EXCP_INTERRUPT;
|
||||
}
|
||||
|
@ -642,7 +643,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
|
|||
}
|
||||
|
||||
*last_tb = NULL;
|
||||
insns_left = atomic_read(&cpu_neg(cpu)->icount_decr.u32);
|
||||
insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32);
|
||||
if (insns_left < 0) {
|
||||
/* Something asked us to stop executing chained TBs; just
|
||||
* continue round the main loop. Whatever requested the exit
|
||||
|
|
|
@ -312,9 +312,9 @@ void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
|
|||
CPU_FOREACH(cpu) {
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
|
||||
full += atomic_read(&env_tlb(env)->c.full_flush_count);
|
||||
part += atomic_read(&env_tlb(env)->c.part_flush_count);
|
||||
elide += atomic_read(&env_tlb(env)->c.elide_flush_count);
|
||||
full += qatomic_read(&env_tlb(env)->c.full_flush_count);
|
||||
part += qatomic_read(&env_tlb(env)->c.part_flush_count);
|
||||
elide += qatomic_read(&env_tlb(env)->c.elide_flush_count);
|
||||
}
|
||||
*pfull = full;
|
||||
*ppart = part;
|
||||
|
@ -349,13 +349,13 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
|
|||
cpu_tb_jmp_cache_clear(cpu);
|
||||
|
||||
if (to_clean == ALL_MMUIDX_BITS) {
|
||||
atomic_set(&env_tlb(env)->c.full_flush_count,
|
||||
qatomic_set(&env_tlb(env)->c.full_flush_count,
|
||||
env_tlb(env)->c.full_flush_count + 1);
|
||||
} else {
|
||||
atomic_set(&env_tlb(env)->c.part_flush_count,
|
||||
qatomic_set(&env_tlb(env)->c.part_flush_count,
|
||||
env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
|
||||
if (to_clean != asked) {
|
||||
atomic_set(&env_tlb(env)->c.elide_flush_count,
|
||||
qatomic_set(&env_tlb(env)->c.elide_flush_count,
|
||||
env_tlb(env)->c.elide_flush_count +
|
||||
ctpop16(asked & ~to_clean));
|
||||
}
|
||||
|
@ -693,7 +693,7 @@ void tlb_unprotect_code(ram_addr_t ram_addr)
|
|||
* generated code.
|
||||
*
|
||||
* Other vCPUs might be reading their TLBs during guest execution, so we update
|
||||
* te->addr_write with atomic_set. We don't need to worry about this for
|
||||
* te->addr_write with qatomic_set. We don't need to worry about this for
|
||||
* oversized guests as MTTCG is disabled for them.
|
||||
*
|
||||
* Called with tlb_c.lock held.
|
||||
|
@ -711,7 +711,7 @@ static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
|
|||
#if TCG_OVERSIZED_GUEST
|
||||
tlb_entry->addr_write |= TLB_NOTDIRTY;
|
||||
#else
|
||||
atomic_set(&tlb_entry->addr_write,
|
||||
qatomic_set(&tlb_entry->addr_write,
|
||||
tlb_entry->addr_write | TLB_NOTDIRTY);
|
||||
#endif
|
||||
}
|
||||
|
@ -1138,8 +1138,8 @@ static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
|
|||
#if TCG_OVERSIZED_GUEST
|
||||
return *(target_ulong *)((uintptr_t)entry + ofs);
|
||||
#else
|
||||
/* ofs might correspond to .addr_write, so use atomic_read */
|
||||
return atomic_read((target_ulong *)((uintptr_t)entry + ofs));
|
||||
/* ofs might correspond to .addr_write, so use qatomic_read */
|
||||
return qatomic_read((target_ulong *)((uintptr_t)entry + ofs));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1155,11 +1155,11 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
|
|||
CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
|
||||
target_ulong cmp;
|
||||
|
||||
/* elt_ofs might correspond to .addr_write, so use atomic_read */
|
||||
/* elt_ofs might correspond to .addr_write, so use qatomic_read */
|
||||
#if TCG_OVERSIZED_GUEST
|
||||
cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
|
||||
#else
|
||||
cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
|
||||
cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
|
||||
#endif
|
||||
|
||||
if (cmp == page) {
|
||||
|
|
|
@ -65,7 +65,7 @@ static void tcg_handle_interrupt(CPUState *cpu, int mask)
|
|||
if (!qemu_cpu_is_self(cpu)) {
|
||||
qemu_cpu_kick(cpu);
|
||||
} else {
|
||||
atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
||||
qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
||||
if (use_icount &&
|
||||
!cpu->can_do_io
|
||||
&& (mask & ~old_mask) != 0) {
|
||||
|
|
|
@ -377,9 +377,9 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
|
|||
restore_state_to_opc(env, tb, data);
|
||||
|
||||
#ifdef CONFIG_PROFILER
|
||||
atomic_set(&prof->restore_time,
|
||||
qatomic_set(&prof->restore_time,
|
||||
prof->restore_time + profile_getclock() - ti);
|
||||
atomic_set(&prof->restore_count, prof->restore_count + 1);
|
||||
qatomic_set(&prof->restore_count, prof->restore_count + 1);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
@ -509,7 +509,7 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
|
|||
|
||||
/* Level 2..N-1. */
|
||||
for (i = v_l2_levels; i > 0; i--) {
|
||||
void **p = atomic_rcu_read(lp);
|
||||
void **p = qatomic_rcu_read(lp);
|
||||
|
||||
if (p == NULL) {
|
||||
void *existing;
|
||||
|
@ -518,7 +518,7 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
|
|||
return NULL;
|
||||
}
|
||||
p = g_new0(void *, V_L2_SIZE);
|
||||
existing = atomic_cmpxchg(lp, NULL, p);
|
||||
existing = qatomic_cmpxchg(lp, NULL, p);
|
||||
if (unlikely(existing)) {
|
||||
g_free(p);
|
||||
p = existing;
|
||||
|
@ -528,7 +528,7 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
|
|||
lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
|
||||
}
|
||||
|
||||
pd = atomic_rcu_read(lp);
|
||||
pd = qatomic_rcu_read(lp);
|
||||
if (pd == NULL) {
|
||||
void *existing;
|
||||
|
||||
|
@ -545,7 +545,7 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
|
|||
}
|
||||
}
|
||||
#endif
|
||||
existing = atomic_cmpxchg(lp, NULL, pd);
|
||||
existing = qatomic_cmpxchg(lp, NULL, pd);
|
||||
if (unlikely(existing)) {
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
{
|
||||
|
@ -1253,7 +1253,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
|
|||
tcg_region_reset_all();
|
||||
/* XXX: flush processor icache at this point if cache flush is
|
||||
expensive */
|
||||
atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
|
||||
qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
|
||||
|
||||
done:
|
||||
mmap_unlock();
|
||||
|
@ -1265,7 +1265,7 @@ done:
|
|||
void tb_flush(CPUState *cpu)
|
||||
{
|
||||
if (tcg_enabled()) {
|
||||
unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count);
|
||||
unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
|
||||
|
||||
if (cpu_in_exclusive_context(cpu)) {
|
||||
do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
|
||||
|
@ -1358,7 +1358,7 @@ static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
|
|||
int n;
|
||||
|
||||
/* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
|
||||
ptr = atomic_or_fetch(&orig->jmp_dest[n_orig], 1);
|
||||
ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1);
|
||||
dest = (TranslationBlock *)(ptr & ~1);
|
||||
if (dest == NULL) {
|
||||
return;
|
||||
|
@ -1369,7 +1369,7 @@ static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
|
|||
* While acquiring the lock, the jump might have been removed if the
|
||||
* destination TB was invalidated; check again.
|
||||
*/
|
||||
ptr_locked = atomic_read(&orig->jmp_dest[n_orig]);
|
||||
ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]);
|
||||
if (ptr_locked != ptr) {
|
||||
qemu_spin_unlock(&dest->jmp_lock);
|
||||
/*
|
||||
|
@ -1415,7 +1415,7 @@ static inline void tb_jmp_unlink(TranslationBlock *dest)
|
|||
|
||||
TB_FOR_EACH_JMP(dest, tb, n) {
|
||||
tb_reset_jump(tb, n);
|
||||
atomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
|
||||
qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
|
||||
/* No need to clear the list entry; setting the dest ptr is enough */
|
||||
}
|
||||
dest->jmp_list_head = (uintptr_t)NULL;
|
||||
|
@ -1439,7 +1439,7 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
|
|||
|
||||
/* make sure no further incoming jumps will be chained to this TB */
|
||||
qemu_spin_lock(&tb->jmp_lock);
|
||||
atomic_set(&tb->cflags, tb->cflags | CF_INVALID);
|
||||
qatomic_set(&tb->cflags, tb->cflags | CF_INVALID);
|
||||
qemu_spin_unlock(&tb->jmp_lock);
|
||||
|
||||
/* remove the TB from the hash list */
|
||||
|
@ -1466,8 +1466,8 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
|
|||
/* remove the TB from the hash list */
|
||||
h = tb_jmp_cache_hash_func(tb->pc);
|
||||
CPU_FOREACH(cpu) {
|
||||
if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
|
||||
atomic_set(&cpu->tb_jmp_cache[h], NULL);
|
||||
if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) {
|
||||
qatomic_set(&cpu->tb_jmp_cache[h], NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1478,7 +1478,7 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
|
|||
/* suppress any remaining jumps to this TB */
|
||||
tb_jmp_unlink(tb);
|
||||
|
||||
atomic_set(&tcg_ctx->tb_phys_invalidate_count,
|
||||
qatomic_set(&tcg_ctx->tb_phys_invalidate_count,
|
||||
tcg_ctx->tb_phys_invalidate_count + 1);
|
||||
}
|
||||
|
||||
|
@ -1733,7 +1733,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
|||
|
||||
#ifdef CONFIG_PROFILER
|
||||
/* includes aborted translations because of exceptions */
|
||||
atomic_set(&prof->tb_count1, prof->tb_count1 + 1);
|
||||
qatomic_set(&prof->tb_count1, prof->tb_count1 + 1);
|
||||
ti = profile_getclock();
|
||||
#endif
|
||||
|
||||
|
@ -1758,8 +1758,9 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
|||
}
|
||||
|
||||
#ifdef CONFIG_PROFILER
|
||||
atomic_set(&prof->tb_count, prof->tb_count + 1);
|
||||
atomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti);
|
||||
qatomic_set(&prof->tb_count, prof->tb_count + 1);
|
||||
qatomic_set(&prof->interm_time,
|
||||
prof->interm_time + profile_getclock() - ti);
|
||||
ti = profile_getclock();
|
||||
#endif
|
||||
|
||||
|
@ -1804,10 +1805,10 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
|||
tb->tc.size = gen_code_size;
|
||||
|
||||
#ifdef CONFIG_PROFILER
|
||||
atomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
|
||||
atomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
|
||||
atomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
|
||||
atomic_set(&prof->search_out_len, prof->search_out_len + search_size);
|
||||
qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
|
||||
qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
|
||||
qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
|
||||
qatomic_set(&prof->search_out_len, prof->search_out_len + search_size);
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG_DISAS
|
||||
|
@ -1869,7 +1870,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
|||
}
|
||||
#endif
|
||||
|
||||
atomic_set(&tcg_ctx->code_gen_ptr, (void *)
|
||||
qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
|
||||
ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
|
||||
CODE_GEN_ALIGN));
|
||||
|
||||
|
@ -1905,7 +1906,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
|||
uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
|
||||
|
||||
orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
|
||||
atomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
|
||||
qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
|
||||
tb_destroy(tb);
|
||||
return existing_tb;
|
||||
}
|
||||
|
@ -2273,7 +2274,7 @@ static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
|
|||
unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
|
||||
|
||||
for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
|
||||
atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
|
||||
qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2393,7 +2394,7 @@ void dump_exec_info(void)
|
|||
|
||||
qemu_printf("\nStatistics:\n");
|
||||
qemu_printf("TB flush count %u\n",
|
||||
atomic_read(&tb_ctx.tb_flush_count));
|
||||
qatomic_read(&tb_ctx.tb_flush_count));
|
||||
qemu_printf("TB invalidate count %zu\n",
|
||||
tcg_tb_phys_invalidate_count());
|
||||
|
||||
|
@ -2415,7 +2416,7 @@ void cpu_interrupt(CPUState *cpu, int mask)
|
|||
{
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
cpu->interrupt_request |= mask;
|
||||
atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
||||
qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue