mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-07-31 06:13:53 -06:00
qemu-thread: Use futex for QemuEvent on Windows
Use the futex-based implementation of QemuEvent on Windows to remove code duplication and remove the overhead of event object construction and destruction. Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Link: https://lore.kernel.org/r/20250526-event-v4-6-5b784cc8e1de@daynix.com Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
d1895f4c17
commit
69e10db83e
7 changed files with 182 additions and 315 deletions
|
@ -231,135 +231,6 @@ void qemu_sem_wait(QemuSemaphore *sem)
|
|||
}
|
||||
}
|
||||
|
||||
/* Wrap a Win32 manual-reset event with a fast userspace path. The idea
|
||||
* is to reset the Win32 event lazily, as part of a test-reset-test-wait
|
||||
* sequence. Such a sequence is, indeed, how QemuEvents are used by
|
||||
* RCU and other subsystems!
|
||||
*
|
||||
* Valid transitions:
|
||||
* - free->set, when setting the event
|
||||
* - busy->set, when setting the event, followed by SetEvent
|
||||
* - set->free, when resetting the event
|
||||
* - free->busy, when waiting
|
||||
*
|
||||
* set->busy does not happen (it can be observed from the outside but
|
||||
* it really is set->free->busy).
|
||||
*
|
||||
* busy->free provably cannot happen; to enforce it, the set->free transition
|
||||
* is done with an OR, which becomes a no-op if the event has concurrently
|
||||
* transitioned to free or busy (and is faster than cmpxchg).
|
||||
*/
|
||||
|
||||
#define EV_SET 0
|
||||
#define EV_FREE 1
|
||||
#define EV_BUSY -1
|
||||
|
||||
void qemu_event_init(QemuEvent *ev, bool init)
|
||||
{
|
||||
/* Manual reset. */
|
||||
ev->event = CreateEvent(NULL, TRUE, TRUE, NULL);
|
||||
ev->value = (init ? EV_SET : EV_FREE);
|
||||
ev->initialized = true;
|
||||
}
|
||||
|
||||
void qemu_event_destroy(QemuEvent *ev)
|
||||
{
|
||||
assert(ev->initialized);
|
||||
ev->initialized = false;
|
||||
CloseHandle(ev->event);
|
||||
}
|
||||
|
||||
void qemu_event_set(QemuEvent *ev)
|
||||
{
|
||||
assert(ev->initialized);
|
||||
|
||||
/*
|
||||
* Pairs with both qemu_event_reset() and qemu_event_wait().
|
||||
*
|
||||
* qemu_event_set has release semantics, but because it *loads*
|
||||
* ev->value we need a full memory barrier here.
|
||||
*/
|
||||
smp_mb();
|
||||
if (qatomic_read(&ev->value) != EV_SET) {
|
||||
int old = qatomic_xchg(&ev->value, EV_SET);
|
||||
|
||||
/* Pairs with memory barrier after ResetEvent. */
|
||||
smp_mb__after_rmw();
|
||||
if (old == EV_BUSY) {
|
||||
/* There were waiters, wake them up. */
|
||||
SetEvent(ev->event);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void qemu_event_reset(QemuEvent *ev)
|
||||
{
|
||||
assert(ev->initialized);
|
||||
|
||||
/*
|
||||
* If there was a concurrent reset (or even reset+wait),
|
||||
* do nothing. Otherwise change EV_SET->EV_FREE.
|
||||
*/
|
||||
qatomic_or(&ev->value, EV_FREE);
|
||||
|
||||
/*
|
||||
* Order reset before checking the condition in the caller.
|
||||
* Pairs with the first memory barrier in qemu_event_set().
|
||||
*/
|
||||
smp_mb__after_rmw();
|
||||
}
|
||||
|
||||
void qemu_event_wait(QemuEvent *ev)
|
||||
{
|
||||
unsigned value;
|
||||
|
||||
assert(ev->initialized);
|
||||
|
||||
/*
|
||||
* qemu_event_wait must synchronize with qemu_event_set even if it does
|
||||
* not go down the slow path, so this load-acquire is needed that
|
||||
* synchronizes with the first memory barrier in qemu_event_set().
|
||||
*
|
||||
* If we do go down the slow path, there is no requirement at all: we
|
||||
* might miss a qemu_event_set() here but ultimately the memory barrier in
|
||||
* qemu_futex_wait() will ensure the check is done correctly.
|
||||
*/
|
||||
value = qatomic_load_acquire(&ev->value);
|
||||
if (value != EV_SET) {
|
||||
if (value == EV_FREE) {
|
||||
/*
|
||||
* Here the underlying kernel event is reset, but qemu_event_set is
|
||||
* not yet going to call SetEvent. However, there will be another
|
||||
* check for EV_SET below when setting EV_BUSY. At that point it
|
||||
* is safe to call WaitForSingleObject.
|
||||
*/
|
||||
ResetEvent(ev->event);
|
||||
|
||||
/*
|
||||
* It is not clear whether ResetEvent provides this barrier; kernel
|
||||
* APIs (KeResetEvent/KeClearEvent) do not. Better safe than sorry!
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
/*
|
||||
* Leave the event reset and tell qemu_event_set that there are
|
||||
* waiters. No need to retry, because there cannot be a concurrent
|
||||
* busy->free transition. After the CAS, the event will be either
|
||||
* set or busy.
|
||||
*/
|
||||
if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* ev->value is now EV_BUSY. Since we didn't observe EV_SET,
|
||||
* qemu_event_set() must observe EV_BUSY and call SetEvent().
|
||||
*/
|
||||
WaitForSingleObject(ev->event, INFINITE);
|
||||
}
|
||||
}
|
||||
|
||||
struct QemuThreadData {
|
||||
/* Passed to win32_start_routine. */
|
||||
void *(*start_routine)(void *);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue