mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-09 02:24:58 -06:00
system/cpus: rename qemu_mutex_lock_iothread() to bql_lock()
The Big QEMU Lock (BQL) has many names and they are confusing. The actual QemuMutex variable is called qemu_global_mutex but it's commonly referred to as the BQL in discussions and some code comments. The locking APIs, however, are called qemu_mutex_lock_iothread() and qemu_mutex_unlock_iothread(). The "iothread" name is historic and comes from when the main thread was split into into KVM vcpu threads and the "iothread" (now called the main loop thread). I have contributed to the confusion myself by introducing a separate --object iothread, a separate concept unrelated to the BQL. The "iothread" name is no longer appropriate for the BQL. Rename the locking APIs to: - void bql_lock(void) - void bql_unlock(void) - bool bql_locked(void) There are more APIs with "iothread" in their names. Subsequent patches will rename them. There are also comments and documentation that will be updated in later patches. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Paul Durrant <paul@xen.org> Acked-by: Fabiano Rosas <farosas@suse.de> Acked-by: David Woodhouse <dwmw@amazon.co.uk> Reviewed-by: Cédric Le Goater <clg@kaod.org> Acked-by: Peter Xu <peterx@redhat.com> Acked-by: Eric Farman <farman@linux.ibm.com> Reviewed-by: Harsh Prateek Bora <harshpb@linux.ibm.com> Acked-by: Hyman Huang <yong.huang@smartx.com> Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> Message-id: 20240102153529.486531-2-stefanha@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
897a06c6d7
commit
195801d700
95 changed files with 529 additions and 529 deletions
|
@ -41,7 +41,7 @@ void accel_blocker_init(void)
|
|||
|
||||
void accel_ioctl_begin(void)
|
||||
{
|
||||
if (likely(qemu_mutex_iothread_locked())) {
|
||||
if (likely(bql_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -51,7 +51,7 @@ void accel_ioctl_begin(void)
|
|||
|
||||
void accel_ioctl_end(void)
|
||||
{
|
||||
if (likely(qemu_mutex_iothread_locked())) {
|
||||
if (likely(bql_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -62,7 +62,7 @@ void accel_ioctl_end(void)
|
|||
|
||||
void accel_cpu_ioctl_begin(CPUState *cpu)
|
||||
{
|
||||
if (unlikely(qemu_mutex_iothread_locked())) {
|
||||
if (unlikely(bql_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -72,7 +72,7 @@ void accel_cpu_ioctl_begin(CPUState *cpu)
|
|||
|
||||
void accel_cpu_ioctl_end(CPUState *cpu)
|
||||
{
|
||||
if (unlikely(qemu_mutex_iothread_locked())) {
|
||||
if (unlikely(bql_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -105,7 +105,7 @@ void accel_ioctl_inhibit_begin(void)
|
|||
* We allow to inhibit only when holding the BQL, so we can identify
|
||||
* when an inhibitor wants to issue an ioctl easily.
|
||||
*/
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
|
||||
/* Block further invocations of the ioctls outside the BQL. */
|
||||
CPU_FOREACH(cpu) {
|
||||
|
|
|
@ -24,7 +24,7 @@ static void *dummy_cpu_thread_fn(void *arg)
|
|||
|
||||
rcu_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->neg.can_do_io = true;
|
||||
|
@ -43,7 +43,7 @@ static void *dummy_cpu_thread_fn(void *arg)
|
|||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
|
||||
do {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
#ifndef _WIN32
|
||||
do {
|
||||
int sig;
|
||||
|
@ -56,11 +56,11 @@ static void *dummy_cpu_thread_fn(void *arg)
|
|||
#else
|
||||
qemu_sem_wait(&cpu->sem);
|
||||
#endif
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_wait_io_event(cpu);
|
||||
} while (!cpu->unplug);
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -424,7 +424,7 @@ static void *hvf_cpu_thread_fn(void *arg)
|
|||
|
||||
rcu_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
|
@ -449,7 +449,7 @@ static void *hvf_cpu_thread_fn(void *arg)
|
|||
|
||||
hvf_vcpu_destroy(cpu);
|
||||
cpu_thread_signal_destroyed(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ static void *kvm_vcpu_thread_fn(void *arg)
|
|||
|
||||
rcu_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->neg.can_do_io = true;
|
||||
|
@ -58,7 +58,7 @@ static void *kvm_vcpu_thread_fn(void *arg)
|
|||
|
||||
kvm_destroy_vcpu(cpu);
|
||||
cpu_thread_signal_destroyed(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -806,7 +806,7 @@ static void kvm_dirty_ring_flush(void)
|
|||
* should always be with BQL held, serialization is guaranteed.
|
||||
* However, let's be sure of it.
|
||||
*/
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
/*
|
||||
* First make sure to flush the hardware buffers by kicking all
|
||||
* vcpus out in a synchronous way.
|
||||
|
@ -1391,9 +1391,9 @@ static void *kvm_dirty_ring_reaper_thread(void *data)
|
|||
trace_kvm_dirty_ring_reaper("wakeup");
|
||||
r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
kvm_dirty_ring_reap(s, NULL);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
r->reaper_iteration++;
|
||||
}
|
||||
|
@ -2817,7 +2817,7 @@ int kvm_cpu_exec(CPUState *cpu)
|
|||
return EXCP_HLT;
|
||||
}
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
cpu_exec_start(cpu);
|
||||
|
||||
do {
|
||||
|
@ -2857,11 +2857,11 @@ int kvm_cpu_exec(CPUState *cpu)
|
|||
|
||||
#ifdef KVM_HAVE_MCE_INJECTION
|
||||
if (unlikely(have_sigbus_pending)) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
|
||||
pending_sigbus_addr);
|
||||
have_sigbus_pending = false;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -2927,7 +2927,7 @@ int kvm_cpu_exec(CPUState *cpu)
|
|||
* still full. Got kicked by KVM_RESET_DIRTY_RINGS.
|
||||
*/
|
||||
trace_kvm_dirty_ring_full(cpu->cpu_index);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
/*
|
||||
* We throttle vCPU by making it sleep once it exit from kernel
|
||||
* due to dirty ring full. In the dirtylimit scenario, reaping
|
||||
|
@ -2939,7 +2939,7 @@ int kvm_cpu_exec(CPUState *cpu)
|
|||
} else {
|
||||
kvm_dirty_ring_reap(kvm_state, NULL);
|
||||
}
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
dirtylimit_vcpu_execute(cpu);
|
||||
ret = 0;
|
||||
break;
|
||||
|
@ -2956,9 +2956,9 @@ int kvm_cpu_exec(CPUState *cpu)
|
|||
break;
|
||||
case KVM_SYSTEM_EVENT_CRASH:
|
||||
kvm_cpu_synchronize_state(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_system_guest_panicked(cpu_get_crash_info(cpu));
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
ret = 0;
|
||||
break;
|
||||
default:
|
||||
|
@ -2973,7 +2973,7 @@ int kvm_cpu_exec(CPUState *cpu)
|
|||
} while (ret == 0);
|
||||
|
||||
cpu_exec_end(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
if (ret < 0) {
|
||||
cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
|
||||
|
|
|
@ -558,8 +558,8 @@ static void cpu_exec_longjmp_cleanup(CPUState *cpu)
|
|||
tcg_ctx->gen_tb = NULL;
|
||||
}
|
||||
#endif
|
||||
if (qemu_mutex_iothread_locked()) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
if (bql_locked()) {
|
||||
bql_unlock();
|
||||
}
|
||||
assert_no_pages_locked();
|
||||
}
|
||||
|
@ -680,10 +680,10 @@ static inline bool cpu_handle_halt(CPUState *cpu)
|
|||
#if defined(TARGET_I386)
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
apic_poll_irq(x86_cpu->apic_state);
|
||||
cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
#endif /* TARGET_I386 */
|
||||
if (!cpu_has_work(cpu)) {
|
||||
|
@ -749,9 +749,9 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
|
|||
#else
|
||||
if (replay_exception()) {
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
cc->tcg_ops->do_interrupt(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
cpu->exception_index = -1;
|
||||
|
||||
if (unlikely(cpu->singlestep_enabled)) {
|
||||
|
@ -812,7 +812,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||
|
||||
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
|
||||
int interrupt_request;
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
interrupt_request = cpu->interrupt_request;
|
||||
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
|
||||
/* Mask out external interrupts for this step. */
|
||||
|
@ -821,7 +821,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
|
||||
cpu->exception_index = EXCP_DEBUG;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return true;
|
||||
}
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
|
@ -832,7 +832,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
|
||||
cpu->halted = 1;
|
||||
cpu->exception_index = EXCP_HLT;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return true;
|
||||
}
|
||||
#if defined(TARGET_I386)
|
||||
|
@ -843,14 +843,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
|
||||
do_cpu_init(x86_cpu);
|
||||
cpu->exception_index = EXCP_HALTED;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
else if (interrupt_request & CPU_INTERRUPT_RESET) {
|
||||
replay_interrupt();
|
||||
cpu_reset(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return true;
|
||||
}
|
||||
#endif /* !TARGET_I386 */
|
||||
|
@ -873,7 +873,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||
*/
|
||||
if (unlikely(cpu->singlestep_enabled)) {
|
||||
cpu->exception_index = EXCP_DEBUG;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return true;
|
||||
}
|
||||
cpu->exception_index = -1;
|
||||
|
@ -892,7 +892,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||
}
|
||||
|
||||
/* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
/* Finally, check if we need to exit to the main loop. */
|
||||
|
|
|
@ -2030,10 +2030,10 @@ static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
|
|||
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
|
||||
mr = section->mr;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
ret = int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx,
|
||||
type, ra, mr, mr_offset);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2054,12 +2054,12 @@ static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
|
|||
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
|
||||
mr = section->mr;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx,
|
||||
MMU_DATA_LOAD, ra, mr, mr_offset);
|
||||
b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx,
|
||||
MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
return int128_make128(b, a);
|
||||
}
|
||||
|
@ -2577,10 +2577,10 @@ static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
|
|||
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
|
||||
mr = section->mr;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
ret = int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx,
|
||||
ra, mr, mr_offset);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2601,12 +2601,12 @@ static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
|
|||
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
|
||||
mr = section->mr;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8,
|
||||
mmu_idx, ra, mr, mr_offset);
|
||||
ret = int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8,
|
||||
size - 8, mmu_idx, ra, mr, mr_offset + 8);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -126,9 +126,9 @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
|
|||
* We're called without the iothread lock, so must take it while
|
||||
* we're calling timer handlers.
|
||||
*/
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
icount_notify_aio_contexts();
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
|
|||
rcu_add_force_rcu_notifier(&force_rcu.notifier);
|
||||
tcg_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
|
@ -91,9 +91,9 @@ static void *mttcg_cpu_thread_fn(void *arg)
|
|||
do {
|
||||
if (cpu_can_run(cpu)) {
|
||||
int r;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
r = tcg_cpus_exec(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
switch (r) {
|
||||
case EXCP_DEBUG:
|
||||
cpu_handle_guest_debug(cpu);
|
||||
|
@ -105,9 +105,9 @@ static void *mttcg_cpu_thread_fn(void *arg)
|
|||
*/
|
||||
break;
|
||||
case EXCP_ATOMIC:
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
cpu_exec_step_atomic(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
default:
|
||||
/* Ignore everything else? */
|
||||
break;
|
||||
|
@ -119,7 +119,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
|
|||
} while (!cpu->unplug || cpu_can_run(cpu));
|
||||
|
||||
tcg_cpus_destroy(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
rcu_remove_force_rcu_notifier(&force_rcu.notifier);
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
|
|
|
@ -188,7 +188,7 @@ static void *rr_cpu_thread_fn(void *arg)
|
|||
rcu_add_force_rcu_notifier(&force_rcu);
|
||||
tcg_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
|
@ -218,9 +218,9 @@ static void *rr_cpu_thread_fn(void *arg)
|
|||
/* Only used for icount_enabled() */
|
||||
int64_t cpu_budget = 0;
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
replay_mutex_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
if (icount_enabled()) {
|
||||
int cpu_count = rr_cpu_count();
|
||||
|
@ -254,7 +254,7 @@ static void *rr_cpu_thread_fn(void *arg)
|
|||
if (cpu_can_run(cpu)) {
|
||||
int r;
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
if (icount_enabled()) {
|
||||
icount_prepare_for_run(cpu, cpu_budget);
|
||||
}
|
||||
|
@ -262,15 +262,15 @@ static void *rr_cpu_thread_fn(void *arg)
|
|||
if (icount_enabled()) {
|
||||
icount_process_data(cpu);
|
||||
}
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
if (r == EXCP_DEBUG) {
|
||||
cpu_handle_guest_debug(cpu);
|
||||
break;
|
||||
} else if (r == EXCP_ATOMIC) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
cpu_exec_step_atomic(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
break;
|
||||
}
|
||||
} else if (cpu->stop) {
|
||||
|
|
|
@ -88,7 +88,7 @@ static void tcg_cpu_reset_hold(CPUState *cpu)
|
|||
/* mask must never be zero, except for A20 change call */
|
||||
void tcg_handle_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
|
||||
cpu->interrupt_request |= mask;
|
||||
|
||||
|
|
|
@ -649,7 +649,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
|
|||
|
||||
void cpu_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
cpu->interrupt_request |= mask;
|
||||
qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue