mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-05 00:33:55 -06:00
* futex: support Windows
* qemu-thread: Avoid futex abstraction for non-Linux * migration, hw/display/apple-gfx: replace QemuSemaphore with QemuEvent * rust: bindings for Error * hpet, rust/hpet: return errors from realize if properties are incorrect * rust/hpet: Drop BqlCell wrapper for num_timers * target/i386: Emulate ftz and denormal flag bits correctly * i386/kvm: Prefault memory on page state change -----BEGIN PGP SIGNATURE----- iQFIBAABCgAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmhC4AgUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroP09wf+K9e0TaaZRxTsw7WU9pXsDoYPzTLd F5CkBZPY770X1JW75f8Xw5qKczI0t6s26eFK1NUZxYiDVWzW/lZT6hreCUQSwzoS b0wlAgPW+bV5dKlKI2wvnadrgDvroj4p560TS+bmRftiu2P0ugkHHtIJNIQ+byUQ sWdhKlUqdOXakMrC4H4wDyIgRbK4CLsRMbnBHBUENwNJYJm39bwlicybbagpUxzt w4mgjbMab0jbAd2hVq8n+A+1sKjrroqOtrhQLzEuMZ0VAwocwuP2Adm6gBu9kdHV tpa8RLopninax3pWVUHnypHX780jkZ8E7zk9ohaaK36NnWTF4W/Z41EOLw== =Vs6V -----END PGP SIGNATURE----- Merge tag 'for-upstream' of https://gitlab.com/bonzini/qemu into staging * futex: support Windows * qemu-thread: Avoid futex abstraction for non-Linux * migration, hw/display/apple-gfx: replace QemuSemaphore with QemuEvent * rust: bindings for Error * hpet, rust/hpet: return errors from realize if properties are incorrect * rust/hpet: Drop BqlCell wrapper for num_timers * target/i386: Emulate ftz and denormal flag bits correctly * i386/kvm: Prefault memory on page state change # -----BEGIN PGP SIGNATURE----- # # iQFIBAABCgAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmhC4AgUHHBib256aW5p # QHJlZGhhdC5jb20ACgkQv/vSX3jHroP09wf+K9e0TaaZRxTsw7WU9pXsDoYPzTLd # F5CkBZPY770X1JW75f8Xw5qKczI0t6s26eFK1NUZxYiDVWzW/lZT6hreCUQSwzoS # b0wlAgPW+bV5dKlKI2wvnadrgDvroj4p560TS+bmRftiu2P0ugkHHtIJNIQ+byUQ # sWdhKlUqdOXakMrC4H4wDyIgRbK4CLsRMbnBHBUENwNJYJm39bwlicybbagpUxzt # w4mgjbMab0jbAd2hVq8n+A+1sKjrroqOtrhQLzEuMZ0VAwocwuP2Adm6gBu9kdHV # tpa8RLopninax3pWVUHnypHX780jkZ8E7zk9ohaaK36NnWTF4W/Z41EOLw== # =Vs6V # -----END PGP SIGNATURE----- # gpg: Signature made Fri 06 Jun 2025 08:33:12 EDT # gpg: using RSA key F13338574B662389866C7682BFFBD25F78C7AE83 # gpg: issuer "pbonzini@redhat.com" # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [full] # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" [full] # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * tag 'for-upstream' of https://gitlab.com/bonzini/qemu: (31 commits) tests/tcg/x86_64/fma: add test for exact-denormal output target/i386: Wire up MXCSR.DE and FPUS.DE correctly target/i386: Use correct type for get_float_exception_flags() values target/i386: Detect flush-to-zero after rounding hw/display/apple-gfx: Replace QemuSemaphore with QemuEvent migration/postcopy: Replace QemuSemaphore with QemuEvent migration/colo: Replace QemuSemaphore with QemuEvent migration: Replace QemuSemaphore with QemuEvent qemu-thread: Document QemuEvent qemu-thread: Use futex if available for QemuLockCnt qemu-thread: Use futex for QemuEvent on Windows qemu-thread: Avoid futex abstraction for non-Linux qemu-thread: Replace __linux__ with CONFIG_LINUX futex: Support Windows futex: Check value after qemu_futex_wait() i386/kvm: Prefault memory on page state change rust: make TryFrom macro more resilient docs: update Rust module status rust/hpet: Drop BqlCell wrapper for num_timers rust/hpet: return errors from realize if properties are incorrect ... Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
96215036f4
48 changed files with 1018 additions and 488 deletions
|
@ -99,6 +99,7 @@ bool kvm_allowed;
|
||||||
bool kvm_readonly_mem_allowed;
|
bool kvm_readonly_mem_allowed;
|
||||||
bool kvm_vm_attributes_allowed;
|
bool kvm_vm_attributes_allowed;
|
||||||
bool kvm_msi_use_devid;
|
bool kvm_msi_use_devid;
|
||||||
|
bool kvm_pre_fault_memory_supported;
|
||||||
static bool kvm_has_guest_debug;
|
static bool kvm_has_guest_debug;
|
||||||
static int kvm_sstep_flags;
|
static int kvm_sstep_flags;
|
||||||
static bool kvm_immediate_exit;
|
static bool kvm_immediate_exit;
|
||||||
|
@ -2745,6 +2746,7 @@ static int kvm_init(MachineState *ms)
|
||||||
kvm_check_extension(s, KVM_CAP_GUEST_MEMFD) &&
|
kvm_check_extension(s, KVM_CAP_GUEST_MEMFD) &&
|
||||||
kvm_check_extension(s, KVM_CAP_USER_MEMORY2) &&
|
kvm_check_extension(s, KVM_CAP_USER_MEMORY2) &&
|
||||||
(kvm_supported_memory_attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE);
|
(kvm_supported_memory_attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE);
|
||||||
|
kvm_pre_fault_memory_supported = kvm_vm_check_extension(s, KVM_CAP_PRE_FAULT_MEMORY);
|
||||||
|
|
||||||
if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) {
|
if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) {
|
||||||
s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
|
s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
|
||||||
|
|
|
@ -96,6 +96,11 @@ are missing:
|
||||||
architecture (VMState). Right now, VMState lacks type safety because
|
architecture (VMState). Right now, VMState lacks type safety because
|
||||||
it is hard to place the ``VMStateField`` definitions in traits.
|
it is hard to place the ``VMStateField`` definitions in traits.
|
||||||
|
|
||||||
|
* NUL-terminated file names with ``#[track_caller]`` are scheduled for
|
||||||
|
inclusion as ``#![feature(location_file_nul)]``, but it will be a while
|
||||||
|
before QEMU can use them. For now, there is special code in
|
||||||
|
``util/error.c`` to support non-NUL-terminated file names.
|
||||||
|
|
||||||
* associated const equality would be nice to have for some users of
|
* associated const equality would be nice to have for some users of
|
||||||
``callbacks::FnCall``, but is still experimental. ``ASSERT_IS_SOME``
|
``callbacks::FnCall``, but is still experimental. ``ASSERT_IS_SOME``
|
||||||
replaces it.
|
replaces it.
|
||||||
|
@ -155,10 +160,10 @@ module status
|
||||||
``callbacks`` complete
|
``callbacks`` complete
|
||||||
``cell`` stable
|
``cell`` stable
|
||||||
``errno`` complete
|
``errno`` complete
|
||||||
|
``error`` stable
|
||||||
``irq`` complete
|
``irq`` complete
|
||||||
``memory`` stable
|
``memory`` stable
|
||||||
``module`` complete
|
``module`` complete
|
||||||
``offset_of`` stable
|
|
||||||
``qdev`` stable
|
``qdev`` stable
|
||||||
``qom`` stable
|
``qom`` stable
|
||||||
``sysbus`` stable
|
``sysbus`` stable
|
||||||
|
|
|
@ -454,7 +454,7 @@ static void set_cursor_glyph(void *opaque)
|
||||||
/* ------ DMA (device reading system memory) ------ */
|
/* ------ DMA (device reading system memory) ------ */
|
||||||
|
|
||||||
typedef struct AppleGFXReadMemoryJob {
|
typedef struct AppleGFXReadMemoryJob {
|
||||||
QemuSemaphore sem;
|
QemuEvent event;
|
||||||
hwaddr physical_address;
|
hwaddr physical_address;
|
||||||
uint64_t length;
|
uint64_t length;
|
||||||
void *dst;
|
void *dst;
|
||||||
|
@ -470,7 +470,7 @@ static void apple_gfx_do_read_memory(void *opaque)
|
||||||
job->dst, job->length, MEMTXATTRS_UNSPECIFIED);
|
job->dst, job->length, MEMTXATTRS_UNSPECIFIED);
|
||||||
job->success = (r == MEMTX_OK);
|
job->success = (r == MEMTX_OK);
|
||||||
|
|
||||||
qemu_sem_post(&job->sem);
|
qemu_event_set(&job->event);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool apple_gfx_read_memory(AppleGFXState *s, hwaddr physical_address,
|
static bool apple_gfx_read_memory(AppleGFXState *s, hwaddr physical_address,
|
||||||
|
@ -483,11 +483,11 @@ static bool apple_gfx_read_memory(AppleGFXState *s, hwaddr physical_address,
|
||||||
trace_apple_gfx_read_memory(physical_address, length, dst);
|
trace_apple_gfx_read_memory(physical_address, length, dst);
|
||||||
|
|
||||||
/* Performing DMA requires BQL, so do it in a BH. */
|
/* Performing DMA requires BQL, so do it in a BH. */
|
||||||
qemu_sem_init(&job.sem, 0);
|
qemu_event_init(&job.event, 0);
|
||||||
aio_bh_schedule_oneshot(qemu_get_aio_context(),
|
aio_bh_schedule_oneshot(qemu_get_aio_context(),
|
||||||
apple_gfx_do_read_memory, &job);
|
apple_gfx_do_read_memory, &job);
|
||||||
qemu_sem_wait(&job.sem);
|
qemu_event_wait(&job.event);
|
||||||
qemu_sem_destroy(&job.sem);
|
qemu_event_destroy(&job.event);
|
||||||
return job.success;
|
return job.success;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -328,16 +328,16 @@ static const VMStateDescription vmstate_hpet_timer = {
|
||||||
static const VMStateDescription vmstate_hpet = {
|
static const VMStateDescription vmstate_hpet = {
|
||||||
.name = "hpet",
|
.name = "hpet",
|
||||||
.version_id = 2,
|
.version_id = 2,
|
||||||
.minimum_version_id = 1,
|
.minimum_version_id = 2,
|
||||||
.pre_save = hpet_pre_save,
|
.pre_save = hpet_pre_save,
|
||||||
.post_load = hpet_post_load,
|
.post_load = hpet_post_load,
|
||||||
.fields = (const VMStateField[]) {
|
.fields = (const VMStateField[]) {
|
||||||
VMSTATE_UINT64(config, HPETState),
|
VMSTATE_UINT64(config, HPETState),
|
||||||
VMSTATE_UINT64(isr, HPETState),
|
VMSTATE_UINT64(isr, HPETState),
|
||||||
VMSTATE_UINT64(hpet_counter, HPETState),
|
VMSTATE_UINT64(hpet_counter, HPETState),
|
||||||
VMSTATE_UINT8_V(num_timers_save, HPETState, 2),
|
VMSTATE_UINT8(num_timers_save, HPETState),
|
||||||
VMSTATE_VALIDATE("num_timers must match", hpet_validate_num_timers),
|
VMSTATE_VALIDATE("num_timers must match", hpet_validate_num_timers),
|
||||||
VMSTATE_STRUCT_VARRAY_UINT8(timer, HPETState, num_timers, 0,
|
VMSTATE_STRUCT_VARRAY_UINT8(timer, HPETState, num_timers_save, 0,
|
||||||
vmstate_hpet_timer, HPETTimer),
|
vmstate_hpet_timer, HPETTimer),
|
||||||
VMSTATE_END_OF_LIST()
|
VMSTATE_END_OF_LIST()
|
||||||
},
|
},
|
||||||
|
@ -691,8 +691,14 @@ static void hpet_realize(DeviceState *dev, Error **errp)
|
||||||
int i;
|
int i;
|
||||||
HPETTimer *timer;
|
HPETTimer *timer;
|
||||||
|
|
||||||
|
if (s->num_timers < HPET_MIN_TIMERS || s->num_timers > HPET_MAX_TIMERS) {
|
||||||
|
error_setg(errp, "hpet.num_timers must be between %d and %d",
|
||||||
|
HPET_MIN_TIMERS, HPET_MAX_TIMERS);
|
||||||
|
return;
|
||||||
|
}
|
||||||
if (!s->intcap) {
|
if (!s->intcap) {
|
||||||
warn_report("Hpet's intcap not initialized");
|
error_setg(errp, "hpet.hpet-intcap not initialized");
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
if (hpet_fw_cfg.count == UINT8_MAX) {
|
if (hpet_fw_cfg.count == UINT8_MAX) {
|
||||||
/* first instance */
|
/* first instance */
|
||||||
|
@ -700,7 +706,7 @@ static void hpet_realize(DeviceState *dev, Error **errp)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (hpet_fw_cfg.count == 8) {
|
if (hpet_fw_cfg.count == 8) {
|
||||||
error_setg(errp, "Only 8 instances of HPET is allowed");
|
error_setg(errp, "Only 8 instances of HPET are allowed");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -710,11 +716,6 @@ static void hpet_realize(DeviceState *dev, Error **errp)
|
||||||
sysbus_init_irq(sbd, &s->irqs[i]);
|
sysbus_init_irq(sbd, &s->irqs[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s->num_timers < HPET_MIN_TIMERS) {
|
|
||||||
s->num_timers = HPET_MIN_TIMERS;
|
|
||||||
} else if (s->num_timers > HPET_MAX_TIMERS) {
|
|
||||||
s->num_timers = HPET_MAX_TIMERS;
|
|
||||||
}
|
|
||||||
for (i = 0; i < HPET_MAX_TIMERS; i++) {
|
for (i = 0; i < HPET_MAX_TIMERS; i++) {
|
||||||
timer = &s->timer[i];
|
timer = &s->timer[i];
|
||||||
timer->qemu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, hpet_timer, timer);
|
timer->qemu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, hpet_timer, timer);
|
||||||
|
|
35
include/qapi/error-internal.h
Normal file
35
include/qapi/error-internal.h
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
/*
|
||||||
|
* QEMU Error Objects - struct definition
|
||||||
|
*
|
||||||
|
* Copyright IBM, Corp. 2011
|
||||||
|
* Copyright (C) 2011-2015 Red Hat, Inc.
|
||||||
|
*
|
||||||
|
* Authors:
|
||||||
|
* Anthony Liguori <aliguori@us.ibm.com>
|
||||||
|
* Markus Armbruster <armbru@redhat.com>,
|
||||||
|
*
|
||||||
|
* This work is licensed under the terms of the GNU LGPL, version 2. See
|
||||||
|
* the COPYING.LIB file in the top-level directory.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef QAPI_ERROR_INTERNAL_H
|
||||||
|
|
||||||
|
struct Error
|
||||||
|
{
|
||||||
|
char *msg;
|
||||||
|
ErrorClass err_class;
|
||||||
|
|
||||||
|
/* Used for error_abort only, may be NULL. */
|
||||||
|
const char *func;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* src might be NUL-terminated or not. If it is, src_len is negative.
|
||||||
|
* If it is not, src_len is the length.
|
||||||
|
*/
|
||||||
|
const char *src;
|
||||||
|
int src_len;
|
||||||
|
int line;
|
||||||
|
GString *hint;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Wrappers around Linux futex syscall
|
* Wrappers around Linux futex syscall and similar
|
||||||
*
|
*
|
||||||
* Copyright Red Hat, Inc. 2017
|
* Copyright Red Hat, Inc. 2017
|
||||||
*
|
*
|
||||||
|
@ -11,17 +11,35 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note that a wake-up can also be caused by common futex usage patterns in
|
||||||
|
* unrelated code that happened to have previously used the futex word's
|
||||||
|
* memory location (e.g., typical futex-based implementations of Pthreads
|
||||||
|
* mutexes can cause this under some conditions). Therefore, qemu_futex_wait()
|
||||||
|
* callers should always conservatively assume that it is a spurious wake-up,
|
||||||
|
* and use the futex word's value (i.e., the user-space synchronization scheme)
|
||||||
|
* to decide whether to continue to block or not.
|
||||||
|
*/
|
||||||
|
|
||||||
#ifndef QEMU_FUTEX_H
|
#ifndef QEMU_FUTEX_H
|
||||||
#define QEMU_FUTEX_H
|
#define QEMU_FUTEX_H
|
||||||
|
|
||||||
|
#define HAVE_FUTEX
|
||||||
|
|
||||||
|
#ifdef CONFIG_LINUX
|
||||||
#include <sys/syscall.h>
|
#include <sys/syscall.h>
|
||||||
#include <linux/futex.h>
|
#include <linux/futex.h>
|
||||||
|
|
||||||
#define qemu_futex(...) syscall(__NR_futex, __VA_ARGS__)
|
#define qemu_futex(...) syscall(__NR_futex, __VA_ARGS__)
|
||||||
|
|
||||||
static inline void qemu_futex_wake(void *f, int n)
|
static inline void qemu_futex_wake_all(void *f)
|
||||||
{
|
{
|
||||||
qemu_futex(f, FUTEX_WAKE, n, NULL, NULL, 0);
|
qemu_futex(f, FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void qemu_futex_wake_single(void *f)
|
||||||
|
{
|
||||||
|
qemu_futex(f, FUTEX_WAKE, 1, NULL, NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void qemu_futex_wait(void *f, unsigned val)
|
static inline void qemu_futex_wait(void *f, unsigned val)
|
||||||
|
@ -37,5 +55,25 @@ static inline void qemu_futex_wait(void *f, unsigned val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#elif defined(CONFIG_WIN32)
|
||||||
|
#include <synchapi.h>
|
||||||
|
|
||||||
|
static inline void qemu_futex_wake_all(void *f)
|
||||||
|
{
|
||||||
|
WakeByAddressAll(f);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void qemu_futex_wake_single(void *f)
|
||||||
|
{
|
||||||
|
WakeByAddressSingle(f);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void qemu_futex_wait(void *f, unsigned val)
|
||||||
|
{
|
||||||
|
WaitOnAddress(f, &val, sizeof(val), INFINITE);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
#undef HAVE_FUTEX
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* QEMU_FUTEX_H */
|
#endif /* QEMU_FUTEX_H */
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
typedef struct QemuLockCnt QemuLockCnt;
|
typedef struct QemuLockCnt QemuLockCnt;
|
||||||
|
|
||||||
struct QemuLockCnt {
|
struct QemuLockCnt {
|
||||||
#ifndef CONFIG_LINUX
|
#ifndef HAVE_FUTEX
|
||||||
QemuMutex mutex;
|
QemuMutex mutex;
|
||||||
#endif
|
#endif
|
||||||
unsigned count;
|
unsigned count;
|
||||||
|
|
|
@ -32,15 +32,6 @@ struct QemuSemaphore {
|
||||||
unsigned int count;
|
unsigned int count;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct QemuEvent {
|
|
||||||
#ifndef __linux__
|
|
||||||
pthread_mutex_t lock;
|
|
||||||
pthread_cond_t cond;
|
|
||||||
#endif
|
|
||||||
unsigned value;
|
|
||||||
bool initialized;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct QemuThread {
|
struct QemuThread {
|
||||||
pthread_t thread;
|
pthread_t thread;
|
||||||
};
|
};
|
||||||
|
|
|
@ -28,12 +28,6 @@ struct QemuSemaphore {
|
||||||
bool initialized;
|
bool initialized;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct QemuEvent {
|
|
||||||
int value;
|
|
||||||
HANDLE event;
|
|
||||||
bool initialized;
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef struct QemuThreadData QemuThreadData;
|
typedef struct QemuThreadData QemuThreadData;
|
||||||
struct QemuThread {
|
struct QemuThread {
|
||||||
QemuThreadData *data;
|
QemuThreadData *data;
|
||||||
|
|
|
@ -3,13 +3,32 @@
|
||||||
|
|
||||||
#include "qemu/processor.h"
|
#include "qemu/processor.h"
|
||||||
#include "qemu/atomic.h"
|
#include "qemu/atomic.h"
|
||||||
|
#include "qemu/futex.h"
|
||||||
|
|
||||||
typedef struct QemuCond QemuCond;
|
typedef struct QemuCond QemuCond;
|
||||||
typedef struct QemuSemaphore QemuSemaphore;
|
typedef struct QemuSemaphore QemuSemaphore;
|
||||||
typedef struct QemuEvent QemuEvent;
|
|
||||||
typedef struct QemuLockCnt QemuLockCnt;
|
typedef struct QemuLockCnt QemuLockCnt;
|
||||||
typedef struct QemuThread QemuThread;
|
typedef struct QemuThread QemuThread;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* QemuEvent
|
||||||
|
* =========
|
||||||
|
*
|
||||||
|
* QemuEvent is an implementation of Win32 manual-reset event object.
|
||||||
|
* For details, refer to:
|
||||||
|
* https://learn.microsoft.com/en-us/windows/win32/sync/using-event-objects
|
||||||
|
*
|
||||||
|
* QemuEvent is more lightweight than QemuSemaphore when HAVE_FUTEX is defined.
|
||||||
|
*/
|
||||||
|
typedef struct QemuEvent {
|
||||||
|
#ifndef HAVE_FUTEX
|
||||||
|
pthread_mutex_t lock;
|
||||||
|
pthread_cond_t cond;
|
||||||
|
#endif
|
||||||
|
unsigned value;
|
||||||
|
bool initialized;
|
||||||
|
} QemuEvent;
|
||||||
|
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
#include "qemu/thread-win32.h"
|
#include "qemu/thread-win32.h"
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -42,6 +42,7 @@ extern bool kvm_gsi_routing_allowed;
|
||||||
extern bool kvm_gsi_direct_mapping;
|
extern bool kvm_gsi_direct_mapping;
|
||||||
extern bool kvm_readonly_mem_allowed;
|
extern bool kvm_readonly_mem_allowed;
|
||||||
extern bool kvm_msi_use_devid;
|
extern bool kvm_msi_use_devid;
|
||||||
|
extern bool kvm_pre_fault_memory_supported;
|
||||||
|
|
||||||
#define kvm_enabled() (kvm_allowed)
|
#define kvm_enabled() (kvm_allowed)
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -838,11 +838,18 @@ emulator_link_args = []
|
||||||
midl = not_found
|
midl = not_found
|
||||||
widl = not_found
|
widl = not_found
|
||||||
pathcch = not_found
|
pathcch = not_found
|
||||||
|
synchronization = not_found
|
||||||
host_dsosuf = '.so'
|
host_dsosuf = '.so'
|
||||||
if host_os == 'windows'
|
if host_os == 'windows'
|
||||||
midl = find_program('midl', required: false)
|
midl = find_program('midl', required: false)
|
||||||
widl = find_program('widl', required: false)
|
widl = find_program('widl', required: false)
|
||||||
pathcch = cc.find_library('pathcch')
|
pathcch = cc.find_library('pathcch')
|
||||||
|
synchronization = cc.find_library('Synchronization', required: false)
|
||||||
|
if not synchronization.found()
|
||||||
|
# The library name is lowercase on mingw
|
||||||
|
synchronization = cc.find_library('synchronization', required: true)
|
||||||
|
endif
|
||||||
|
|
||||||
socket = cc.find_library('ws2_32')
|
socket = cc.find_library('ws2_32')
|
||||||
winmm = cc.find_library('winmm')
|
winmm = cc.find_library('winmm')
|
||||||
|
|
||||||
|
|
|
@ -146,7 +146,7 @@ static void secondary_vm_do_failover(void)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
/* Notify COLO incoming thread that failover work is finished */
|
/* Notify COLO incoming thread that failover work is finished */
|
||||||
qemu_sem_post(&mis->colo_incoming_sem);
|
qemu_event_set(&mis->colo_incoming_event);
|
||||||
|
|
||||||
/* For Secondary VM, jump to incoming co */
|
/* For Secondary VM, jump to incoming co */
|
||||||
if (mis->colo_incoming_co) {
|
if (mis->colo_incoming_co) {
|
||||||
|
@ -195,7 +195,7 @@ static void primary_vm_do_failover(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Notify COLO thread that failover work is finished */
|
/* Notify COLO thread that failover work is finished */
|
||||||
qemu_sem_post(&s->colo_exit_sem);
|
qemu_event_set(&s->colo_exit_event);
|
||||||
}
|
}
|
||||||
|
|
||||||
COLOMode get_colo_mode(void)
|
COLOMode get_colo_mode(void)
|
||||||
|
@ -620,8 +620,8 @@ out:
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Hope this not to be too long to wait here */
|
/* Hope this not to be too long to wait here */
|
||||||
qemu_sem_wait(&s->colo_exit_sem);
|
qemu_event_wait(&s->colo_exit_event);
|
||||||
qemu_sem_destroy(&s->colo_exit_sem);
|
qemu_event_destroy(&s->colo_exit_event);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* It is safe to unregister notifier after failover finished.
|
* It is safe to unregister notifier after failover finished.
|
||||||
|
@ -651,7 +651,7 @@ void migrate_start_colo_process(MigrationState *s)
|
||||||
s->colo_delay_timer = timer_new_ms(QEMU_CLOCK_HOST,
|
s->colo_delay_timer = timer_new_ms(QEMU_CLOCK_HOST,
|
||||||
colo_checkpoint_notify_timer, NULL);
|
colo_checkpoint_notify_timer, NULL);
|
||||||
|
|
||||||
qemu_sem_init(&s->colo_exit_sem, 0);
|
qemu_event_init(&s->colo_exit_event, false);
|
||||||
colo_process_checkpoint(s);
|
colo_process_checkpoint(s);
|
||||||
bql_lock();
|
bql_lock();
|
||||||
}
|
}
|
||||||
|
@ -808,11 +808,11 @@ void colo_shutdown(void)
|
||||||
case COLO_MODE_PRIMARY:
|
case COLO_MODE_PRIMARY:
|
||||||
s = migrate_get_current();
|
s = migrate_get_current();
|
||||||
qemu_event_set(&s->colo_checkpoint_event);
|
qemu_event_set(&s->colo_checkpoint_event);
|
||||||
qemu_sem_post(&s->colo_exit_sem);
|
qemu_event_set(&s->colo_exit_event);
|
||||||
break;
|
break;
|
||||||
case COLO_MODE_SECONDARY:
|
case COLO_MODE_SECONDARY:
|
||||||
mis = migration_incoming_get_current();
|
mis = migration_incoming_get_current();
|
||||||
qemu_sem_post(&mis->colo_incoming_sem);
|
qemu_event_set(&mis->colo_incoming_event);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
@ -827,7 +827,7 @@ static void *colo_process_incoming_thread(void *opaque)
|
||||||
Error *local_err = NULL;
|
Error *local_err = NULL;
|
||||||
|
|
||||||
rcu_register_thread();
|
rcu_register_thread();
|
||||||
qemu_sem_init(&mis->colo_incoming_sem, 0);
|
qemu_event_init(&mis->colo_incoming_event, false);
|
||||||
|
|
||||||
migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
|
migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
|
||||||
MIGRATION_STATUS_COLO);
|
MIGRATION_STATUS_COLO);
|
||||||
|
@ -923,8 +923,8 @@ out:
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Hope this not to be too long to loop here */
|
/* Hope this not to be too long to loop here */
|
||||||
qemu_sem_wait(&mis->colo_incoming_sem);
|
qemu_event_wait(&mis->colo_incoming_event);
|
||||||
qemu_sem_destroy(&mis->colo_incoming_sem);
|
qemu_event_destroy(&mis->colo_incoming_event);
|
||||||
|
|
||||||
rcu_unregister_thread();
|
rcu_unregister_thread();
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
|
@ -1630,7 +1630,7 @@ void migration_cancel(void)
|
||||||
}
|
}
|
||||||
/* If the migration is paused, kick it out of the pause */
|
/* If the migration is paused, kick it out of the pause */
|
||||||
if (old_state == MIGRATION_STATUS_PRE_SWITCHOVER) {
|
if (old_state == MIGRATION_STATUS_PRE_SWITCHOVER) {
|
||||||
qemu_sem_post(&s->pause_sem);
|
qemu_event_set(&s->pause_event);
|
||||||
}
|
}
|
||||||
migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
|
migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
|
||||||
} while (s->state != MIGRATION_STATUS_CANCELLING);
|
} while (s->state != MIGRATION_STATUS_CANCELLING);
|
||||||
|
@ -2342,7 +2342,7 @@ void qmp_migrate_continue(MigrationStatus state, Error **errp)
|
||||||
MigrationStatus_str(s->state));
|
MigrationStatus_str(s->state));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
qemu_sem_post(&s->pause_sem);
|
qemu_event_set(&s->pause_event);
|
||||||
}
|
}
|
||||||
|
|
||||||
int migration_rp_wait(MigrationState *s)
|
int migration_rp_wait(MigrationState *s)
|
||||||
|
@ -2911,21 +2911,18 @@ static bool migration_switchover_prepare(MigrationState *s)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Since leaving this state is not atomic with posting the semaphore
|
/*
|
||||||
|
* Since leaving this state is not atomic with setting the event
|
||||||
* it's possible that someone could have issued multiple migrate_continue
|
* it's possible that someone could have issued multiple migrate_continue
|
||||||
* and the semaphore is incorrectly positive at this point;
|
* and the event is incorrectly set at this point so reset it.
|
||||||
* the docs say it's undefined to reinit a semaphore that's already
|
|
||||||
* init'd, so use timedwait to eat up any existing posts.
|
|
||||||
*/
|
*/
|
||||||
while (qemu_sem_timedwait(&s->pause_sem, 1) == 0) {
|
qemu_event_reset(&s->pause_event);
|
||||||
/* This block intentionally left blank */
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Update [POSTCOPY_]ACTIVE to PRE_SWITCHOVER */
|
/* Update [POSTCOPY_]ACTIVE to PRE_SWITCHOVER */
|
||||||
migrate_set_state(&s->state, s->state, MIGRATION_STATUS_PRE_SWITCHOVER);
|
migrate_set_state(&s->state, s->state, MIGRATION_STATUS_PRE_SWITCHOVER);
|
||||||
bql_unlock();
|
bql_unlock();
|
||||||
|
|
||||||
qemu_sem_wait(&s->pause_sem);
|
qemu_event_wait(&s->pause_event);
|
||||||
|
|
||||||
bql_lock();
|
bql_lock();
|
||||||
/*
|
/*
|
||||||
|
@ -4057,7 +4054,7 @@ static void migration_instance_finalize(Object *obj)
|
||||||
qemu_mutex_destroy(&ms->qemu_file_lock);
|
qemu_mutex_destroy(&ms->qemu_file_lock);
|
||||||
qemu_sem_destroy(&ms->wait_unplug_sem);
|
qemu_sem_destroy(&ms->wait_unplug_sem);
|
||||||
qemu_sem_destroy(&ms->rate_limit_sem);
|
qemu_sem_destroy(&ms->rate_limit_sem);
|
||||||
qemu_sem_destroy(&ms->pause_sem);
|
qemu_event_destroy(&ms->pause_event);
|
||||||
qemu_sem_destroy(&ms->postcopy_pause_sem);
|
qemu_sem_destroy(&ms->postcopy_pause_sem);
|
||||||
qemu_sem_destroy(&ms->rp_state.rp_sem);
|
qemu_sem_destroy(&ms->rp_state.rp_sem);
|
||||||
qemu_sem_destroy(&ms->rp_state.rp_pong_acks);
|
qemu_sem_destroy(&ms->rp_state.rp_pong_acks);
|
||||||
|
@ -4072,7 +4069,7 @@ static void migration_instance_init(Object *obj)
|
||||||
ms->state = MIGRATION_STATUS_NONE;
|
ms->state = MIGRATION_STATUS_NONE;
|
||||||
ms->mbps = -1;
|
ms->mbps = -1;
|
||||||
ms->pages_per_second = -1;
|
ms->pages_per_second = -1;
|
||||||
qemu_sem_init(&ms->pause_sem, 0);
|
qemu_event_init(&ms->pause_event, false);
|
||||||
qemu_mutex_init(&ms->error_mutex);
|
qemu_mutex_init(&ms->error_mutex);
|
||||||
|
|
||||||
migrate_params_init(&ms->parameters);
|
migrate_params_init(&ms->parameters);
|
||||||
|
|
|
@ -98,9 +98,9 @@ struct MigrationIncomingState {
|
||||||
void (*transport_cleanup)(void *data);
|
void (*transport_cleanup)(void *data);
|
||||||
/*
|
/*
|
||||||
* Used to sync thread creations. Note that we can't create threads in
|
* Used to sync thread creations. Note that we can't create threads in
|
||||||
* parallel with this sem.
|
* parallel with this event.
|
||||||
*/
|
*/
|
||||||
QemuSemaphore thread_sync_sem;
|
QemuEvent thread_sync_event;
|
||||||
/*
|
/*
|
||||||
* Free at the start of the main state load, set as the main thread finishes
|
* Free at the start of the main state load, set as the main thread finishes
|
||||||
* loading state.
|
* loading state.
|
||||||
|
@ -186,7 +186,7 @@ struct MigrationIncomingState {
|
||||||
|
|
||||||
/* The coroutine we should enter (back) after failover */
|
/* The coroutine we should enter (back) after failover */
|
||||||
Coroutine *colo_incoming_co;
|
Coroutine *colo_incoming_co;
|
||||||
QemuSemaphore colo_incoming_sem;
|
QemuEvent colo_incoming_event;
|
||||||
|
|
||||||
/* Optional load threads pool and its thread exit request flag */
|
/* Optional load threads pool and its thread exit request flag */
|
||||||
ThreadPool *load_threads;
|
ThreadPool *load_threads;
|
||||||
|
@ -379,10 +379,10 @@ struct MigrationState {
|
||||||
QemuSemaphore wait_unplug_sem;
|
QemuSemaphore wait_unplug_sem;
|
||||||
|
|
||||||
/* Migration is paused due to pause-before-switchover */
|
/* Migration is paused due to pause-before-switchover */
|
||||||
QemuSemaphore pause_sem;
|
QemuEvent pause_event;
|
||||||
|
|
||||||
/* The semaphore is used to notify COLO thread that failover is finished */
|
/* The event is used to notify COLO thread that failover is finished */
|
||||||
QemuSemaphore colo_exit_sem;
|
QemuEvent colo_exit_event;
|
||||||
|
|
||||||
/* The event is used to notify COLO thread to do checkpoint */
|
/* The event is used to notify COLO thread to do checkpoint */
|
||||||
QemuEvent colo_checkpoint_event;
|
QemuEvent colo_checkpoint_event;
|
||||||
|
|
|
@ -90,10 +90,10 @@ void postcopy_thread_create(MigrationIncomingState *mis,
|
||||||
QemuThread *thread, const char *name,
|
QemuThread *thread, const char *name,
|
||||||
void *(*fn)(void *), int joinable)
|
void *(*fn)(void *), int joinable)
|
||||||
{
|
{
|
||||||
qemu_sem_init(&mis->thread_sync_sem, 0);
|
qemu_event_init(&mis->thread_sync_event, false);
|
||||||
qemu_thread_create(thread, name, fn, mis, joinable);
|
qemu_thread_create(thread, name, fn, mis, joinable);
|
||||||
qemu_sem_wait(&mis->thread_sync_sem);
|
qemu_event_wait(&mis->thread_sync_event);
|
||||||
qemu_sem_destroy(&mis->thread_sync_sem);
|
qemu_event_destroy(&mis->thread_sync_event);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Postcopy needs to detect accesses to pages that haven't yet been copied
|
/* Postcopy needs to detect accesses to pages that haven't yet been copied
|
||||||
|
@ -964,7 +964,7 @@ static void *postcopy_ram_fault_thread(void *opaque)
|
||||||
trace_postcopy_ram_fault_thread_entry();
|
trace_postcopy_ram_fault_thread_entry();
|
||||||
rcu_register_thread();
|
rcu_register_thread();
|
||||||
mis->last_rb = NULL; /* last RAMBlock we sent part of */
|
mis->last_rb = NULL; /* last RAMBlock we sent part of */
|
||||||
qemu_sem_post(&mis->thread_sync_sem);
|
qemu_event_set(&mis->thread_sync_event);
|
||||||
|
|
||||||
struct pollfd *pfd;
|
struct pollfd *pfd;
|
||||||
size_t pfd_len = 2 + mis->postcopy_remote_fds->len;
|
size_t pfd_len = 2 + mis->postcopy_remote_fds->len;
|
||||||
|
@ -1716,7 +1716,7 @@ void *postcopy_preempt_thread(void *opaque)
|
||||||
|
|
||||||
rcu_register_thread();
|
rcu_register_thread();
|
||||||
|
|
||||||
qemu_sem_post(&mis->thread_sync_sem);
|
qemu_event_set(&mis->thread_sync_event);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The preempt channel is established in asynchronous way. Wait
|
* The preempt channel is established in asynchronous way. Wait
|
||||||
|
|
|
@ -2078,7 +2078,7 @@ static void *postcopy_ram_listen_thread(void *opaque)
|
||||||
|
|
||||||
migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
|
migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
|
||||||
MIGRATION_STATUS_POSTCOPY_ACTIVE);
|
MIGRATION_STATUS_POSTCOPY_ACTIVE);
|
||||||
qemu_sem_post(&mis->thread_sync_sem);
|
qemu_event_set(&mis->thread_sync_event);
|
||||||
trace_postcopy_ram_listen_thread_start();
|
trace_postcopy_ram_listen_thread_start();
|
||||||
|
|
||||||
rcu_register_thread();
|
rcu_register_thread();
|
||||||
|
|
17
rust/Cargo.lock
generated
17
rust/Cargo.lock
generated
|
@ -2,6 +2,12 @@
|
||||||
# It is not intended for manual editing.
|
# It is not intended for manual editing.
|
||||||
version = 3
|
version = 3
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "anyhow"
|
||||||
|
version = "1.0.98"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "arbitrary-int"
|
name = "arbitrary-int"
|
||||||
version = "1.2.7"
|
version = "1.2.7"
|
||||||
|
@ -44,6 +50,15 @@ version = "1.12.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b"
|
checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "foreign"
|
||||||
|
version = "0.3.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "17ca1b5be8c9d320daf386f1809c7acc0cb09accbae795c2001953fa50585846"
|
||||||
|
dependencies = [
|
||||||
|
"libc",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hpet"
|
name = "hpet"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
|
@ -114,6 +129,8 @@ dependencies = [
|
||||||
name = "qemu_api"
|
name = "qemu_api"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"anyhow",
|
||||||
|
"foreign",
|
||||||
"libc",
|
"libc",
|
||||||
"qemu_api_macros",
|
"qemu_api_macros",
|
||||||
]
|
]
|
||||||
|
|
|
@ -67,6 +67,7 @@ missing_safety_doc = "deny"
|
||||||
mut_mut = "deny"
|
mut_mut = "deny"
|
||||||
needless_bitwise_bool = "deny"
|
needless_bitwise_bool = "deny"
|
||||||
needless_pass_by_ref_mut = "deny"
|
needless_pass_by_ref_mut = "deny"
|
||||||
|
needless_update = "deny"
|
||||||
no_effect_underscore_binding = "deny"
|
no_effect_underscore_binding = "deny"
|
||||||
option_option = "deny"
|
option_option = "deny"
|
||||||
or_fun_call = "deny"
|
or_fun_call = "deny"
|
||||||
|
|
|
@ -175,7 +175,7 @@ impl DeviceImpl for PL011State {
|
||||||
fn vmsd() -> Option<&'static VMStateDescription> {
|
fn vmsd() -> Option<&'static VMStateDescription> {
|
||||||
Some(&device_class::VMSTATE_PL011)
|
Some(&device_class::VMSTATE_PL011)
|
||||||
}
|
}
|
||||||
const REALIZE: Option<fn(&Self)> = Some(Self::realize);
|
const REALIZE: Option<fn(&Self) -> qemu_api::Result<()>> = Some(Self::realize);
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ResettablePhasesImpl for PL011State {
|
impl ResettablePhasesImpl for PL011State {
|
||||||
|
@ -619,9 +619,10 @@ impl PL011State {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn realize(&self) {
|
fn realize(&self) -> qemu_api::Result<()> {
|
||||||
self.char_backend
|
self.char_backend
|
||||||
.enable_handlers(self, Self::can_receive, Self::receive, Self::event);
|
.enable_handlers(self, Self::can_receive, Self::receive, Self::event);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn reset_hold(&self, _type: ResetType) {
|
fn reset_hold(&self, _type: ResetType) {
|
||||||
|
|
|
@ -12,7 +12,7 @@ use std::{
|
||||||
use qemu_api::{
|
use qemu_api::{
|
||||||
bindings::{
|
bindings::{
|
||||||
address_space_memory, address_space_stl_le, qdev_prop_bit, qdev_prop_bool,
|
address_space_memory, address_space_stl_le, qdev_prop_bit, qdev_prop_bool,
|
||||||
qdev_prop_uint32, qdev_prop_uint8,
|
qdev_prop_uint32, qdev_prop_usize,
|
||||||
},
|
},
|
||||||
cell::{BqlCell, BqlRefCell},
|
cell::{BqlCell, BqlRefCell},
|
||||||
irq::InterruptSource,
|
irq::InterruptSource,
|
||||||
|
@ -36,9 +36,9 @@ use crate::fw_cfg::HPETFwConfig;
|
||||||
const HPET_REG_SPACE_LEN: u64 = 0x400; // 1024 bytes
|
const HPET_REG_SPACE_LEN: u64 = 0x400; // 1024 bytes
|
||||||
|
|
||||||
/// Minimum recommended hardware implementation.
|
/// Minimum recommended hardware implementation.
|
||||||
const HPET_MIN_TIMERS: u8 = 3;
|
const HPET_MIN_TIMERS: usize = 3;
|
||||||
/// Maximum timers in each timer block.
|
/// Maximum timers in each timer block.
|
||||||
const HPET_MAX_TIMERS: u8 = 32;
|
const HPET_MAX_TIMERS: usize = 32;
|
||||||
|
|
||||||
/// Flags that HPETState.flags supports.
|
/// Flags that HPETState.flags supports.
|
||||||
const HPET_FLAG_MSI_SUPPORT_SHIFT: usize = 0;
|
const HPET_FLAG_MSI_SUPPORT_SHIFT: usize = 0;
|
||||||
|
@ -561,8 +561,8 @@ pub struct HPETState {
|
||||||
|
|
||||||
/// HPET timer array managed by this timer block.
|
/// HPET timer array managed by this timer block.
|
||||||
#[doc(alias = "timer")]
|
#[doc(alias = "timer")]
|
||||||
timers: [BqlRefCell<HPETTimer>; HPET_MAX_TIMERS as usize],
|
timers: [BqlRefCell<HPETTimer>; HPET_MAX_TIMERS],
|
||||||
num_timers: BqlCell<u8>,
|
num_timers: usize,
|
||||||
num_timers_save: BqlCell<u8>,
|
num_timers_save: BqlCell<u8>,
|
||||||
|
|
||||||
/// Instance id (HPET timer block ID).
|
/// Instance id (HPET timer block ID).
|
||||||
|
@ -570,11 +570,6 @@ pub struct HPETState {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HPETState {
|
impl HPETState {
|
||||||
// Get num_timers with `usize` type, which is useful to play with array index.
|
|
||||||
fn get_num_timers(&self) -> usize {
|
|
||||||
self.num_timers.get().into()
|
|
||||||
}
|
|
||||||
|
|
||||||
const fn has_msi_flag(&self) -> bool {
|
const fn has_msi_flag(&self) -> bool {
|
||||||
self.flags & (1 << HPET_FLAG_MSI_SUPPORT_SHIFT) != 0
|
self.flags & (1 << HPET_FLAG_MSI_SUPPORT_SHIFT) != 0
|
||||||
}
|
}
|
||||||
|
@ -636,7 +631,7 @@ impl HPETState {
|
||||||
self.hpet_offset
|
self.hpet_offset
|
||||||
.set(ticks_to_ns(self.counter.get()) - CLOCK_VIRTUAL.get_ns());
|
.set(ticks_to_ns(self.counter.get()) - CLOCK_VIRTUAL.get_ns());
|
||||||
|
|
||||||
for timer in self.timers.iter().take(self.get_num_timers()) {
|
for timer in self.timers.iter().take(self.num_timers) {
|
||||||
let mut t = timer.borrow_mut();
|
let mut t = timer.borrow_mut();
|
||||||
|
|
||||||
if t.is_int_enabled() && t.is_int_active() {
|
if t.is_int_enabled() && t.is_int_active() {
|
||||||
|
@ -648,7 +643,7 @@ impl HPETState {
|
||||||
// Halt main counter and disable interrupt generation.
|
// Halt main counter and disable interrupt generation.
|
||||||
self.counter.set(self.get_ticks());
|
self.counter.set(self.get_ticks());
|
||||||
|
|
||||||
for timer in self.timers.iter().take(self.get_num_timers()) {
|
for timer in self.timers.iter().take(self.num_timers) {
|
||||||
timer.borrow_mut().del_timer();
|
timer.borrow_mut().del_timer();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -671,7 +666,7 @@ impl HPETState {
|
||||||
let new_val = val << shift;
|
let new_val = val << shift;
|
||||||
let cleared = new_val & self.int_status.get();
|
let cleared = new_val & self.int_status.get();
|
||||||
|
|
||||||
for (index, timer) in self.timers.iter().take(self.get_num_timers()).enumerate() {
|
for (index, timer) in self.timers.iter().take(self.num_timers).enumerate() {
|
||||||
if cleared & (1 << index) != 0 {
|
if cleared & (1 << index) != 0 {
|
||||||
timer.borrow_mut().update_irq(false);
|
timer.borrow_mut().update_irq(false);
|
||||||
}
|
}
|
||||||
|
@ -724,19 +719,17 @@ impl HPETState {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn realize(&self) {
|
fn realize(&self) -> qemu_api::Result<()> {
|
||||||
|
if self.num_timers < HPET_MIN_TIMERS || self.num_timers > HPET_MAX_TIMERS {
|
||||||
|
Err(format!(
|
||||||
|
"hpet.num_timers must be between {HPET_MIN_TIMERS} and {HPET_MAX_TIMERS}"
|
||||||
|
))?;
|
||||||
|
}
|
||||||
if self.int_route_cap == 0 {
|
if self.int_route_cap == 0 {
|
||||||
// TODO: Add error binding: warn_report()
|
Err("hpet.hpet-intcap property not initialized")?;
|
||||||
println!("Hpet's hpet-intcap property not initialized");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
self.hpet_id.set(HPETFwConfig::assign_hpet_id());
|
self.hpet_id.set(HPETFwConfig::assign_hpet_id()?);
|
||||||
|
|
||||||
if self.num_timers.get() < HPET_MIN_TIMERS {
|
|
||||||
self.num_timers.set(HPET_MIN_TIMERS);
|
|
||||||
} else if self.num_timers.get() > HPET_MAX_TIMERS {
|
|
||||||
self.num_timers.set(HPET_MAX_TIMERS);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.init_timer();
|
self.init_timer();
|
||||||
// 64-bit General Capabilities and ID Register; LegacyReplacementRoute.
|
// 64-bit General Capabilities and ID Register; LegacyReplacementRoute.
|
||||||
|
@ -745,16 +738,17 @@ impl HPETState {
|
||||||
1 << HPET_CAP_COUNT_SIZE_CAP_SHIFT |
|
1 << HPET_CAP_COUNT_SIZE_CAP_SHIFT |
|
||||||
1 << HPET_CAP_LEG_RT_CAP_SHIFT |
|
1 << HPET_CAP_LEG_RT_CAP_SHIFT |
|
||||||
HPET_CAP_VENDER_ID_VALUE << HPET_CAP_VENDER_ID_SHIFT |
|
HPET_CAP_VENDER_ID_VALUE << HPET_CAP_VENDER_ID_SHIFT |
|
||||||
((self.get_num_timers() - 1) as u64) << HPET_CAP_NUM_TIM_SHIFT | // indicate the last timer
|
((self.num_timers - 1) as u64) << HPET_CAP_NUM_TIM_SHIFT | // indicate the last timer
|
||||||
(HPET_CLK_PERIOD * FS_PER_NS) << HPET_CAP_CNT_CLK_PERIOD_SHIFT, // 10 ns
|
(HPET_CLK_PERIOD * FS_PER_NS) << HPET_CAP_CNT_CLK_PERIOD_SHIFT, // 10 ns
|
||||||
);
|
);
|
||||||
|
|
||||||
self.init_gpio_in(2, HPETState::handle_legacy_irq);
|
self.init_gpio_in(2, HPETState::handle_legacy_irq);
|
||||||
self.init_gpio_out(from_ref(&self.pit_enabled));
|
self.init_gpio_out(from_ref(&self.pit_enabled));
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn reset_hold(&self, _type: ResetType) {
|
fn reset_hold(&self, _type: ResetType) {
|
||||||
for timer in self.timers.iter().take(self.get_num_timers()) {
|
for timer in self.timers.iter().take(self.num_timers) {
|
||||||
timer.borrow_mut().reset();
|
timer.borrow_mut().reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -782,7 +776,7 @@ impl HPETState {
|
||||||
GlobalRegister::try_from(addr).map(HPETRegister::Global)
|
GlobalRegister::try_from(addr).map(HPETRegister::Global)
|
||||||
} else {
|
} else {
|
||||||
let timer_id: usize = ((addr - 0x100) / 0x20) as usize;
|
let timer_id: usize = ((addr - 0x100) / 0x20) as usize;
|
||||||
if timer_id <= self.get_num_timers() {
|
if timer_id < self.num_timers {
|
||||||
// TODO: Add trace point - trace_hpet_ram_[read|write]_timer_id(timer_id)
|
// TODO: Add trace point - trace_hpet_ram_[read|write]_timer_id(timer_id)
|
||||||
TimerRegister::try_from(addr & 0x18)
|
TimerRegister::try_from(addr & 0x18)
|
||||||
.map(|reg| HPETRegister::Timer(&self.timers[timer_id], reg))
|
.map(|reg| HPETRegister::Timer(&self.timers[timer_id], reg))
|
||||||
|
@ -853,12 +847,12 @@ impl HPETState {
|
||||||
* also added to the migration stream. Check that it matches the value
|
* also added to the migration stream. Check that it matches the value
|
||||||
* that was configured.
|
* that was configured.
|
||||||
*/
|
*/
|
||||||
self.num_timers_save.set(self.num_timers.get());
|
self.num_timers_save.set(self.num_timers as u8);
|
||||||
0
|
0
|
||||||
}
|
}
|
||||||
|
|
||||||
fn post_load(&self, _version_id: u8) -> i32 {
|
fn post_load(&self, _version_id: u8) -> i32 {
|
||||||
for timer in self.timers.iter().take(self.get_num_timers()) {
|
for timer in self.timers.iter().take(self.num_timers) {
|
||||||
let mut t = timer.borrow_mut();
|
let mut t = timer.borrow_mut();
|
||||||
|
|
||||||
t.cmp64 = t.calculate_cmp64(t.get_state().counter.get(), t.cmp);
|
t.cmp64 = t.calculate_cmp64(t.get_state().counter.get(), t.cmp);
|
||||||
|
@ -883,7 +877,7 @@ impl HPETState {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn validate_num_timers(&self, _version_id: u8) -> bool {
|
fn validate_num_timers(&self, _version_id: u8) -> bool {
|
||||||
self.num_timers.get() == self.num_timers_save.get()
|
self.num_timers == self.num_timers_save.get().into()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -910,7 +904,7 @@ qemu_api::declare_properties! {
|
||||||
c"timers",
|
c"timers",
|
||||||
HPETState,
|
HPETState,
|
||||||
num_timers,
|
num_timers,
|
||||||
unsafe { &qdev_prop_uint8 },
|
unsafe { &qdev_prop_usize },
|
||||||
u8,
|
u8,
|
||||||
default = HPET_MIN_TIMERS
|
default = HPET_MIN_TIMERS
|
||||||
),
|
),
|
||||||
|
@ -1015,16 +1009,16 @@ const VALIDATE_TIMERS_NAME: &CStr = c"num_timers must match";
|
||||||
static VMSTATE_HPET: VMStateDescription = VMStateDescription {
|
static VMSTATE_HPET: VMStateDescription = VMStateDescription {
|
||||||
name: c"hpet".as_ptr(),
|
name: c"hpet".as_ptr(),
|
||||||
version_id: 2,
|
version_id: 2,
|
||||||
minimum_version_id: 1,
|
minimum_version_id: 2,
|
||||||
pre_save: Some(hpet_pre_save),
|
pre_save: Some(hpet_pre_save),
|
||||||
post_load: Some(hpet_post_load),
|
post_load: Some(hpet_post_load),
|
||||||
fields: vmstate_fields! {
|
fields: vmstate_fields! {
|
||||||
vmstate_of!(HPETState, config),
|
vmstate_of!(HPETState, config),
|
||||||
vmstate_of!(HPETState, int_status),
|
vmstate_of!(HPETState, int_status),
|
||||||
vmstate_of!(HPETState, counter),
|
vmstate_of!(HPETState, counter),
|
||||||
vmstate_of!(HPETState, num_timers_save).with_version_id(2),
|
vmstate_of!(HPETState, num_timers_save),
|
||||||
vmstate_validate!(HPETState, VALIDATE_TIMERS_NAME, HPETState::validate_num_timers),
|
vmstate_validate!(HPETState, VALIDATE_TIMERS_NAME, HPETState::validate_num_timers),
|
||||||
vmstate_struct!(HPETState, timers[0 .. num_timers], &VMSTATE_HPET_TIMER, BqlRefCell<HPETTimer>, HPETState::validate_num_timers).with_version_id(0),
|
vmstate_struct!(HPETState, timers[0 .. num_timers_save], &VMSTATE_HPET_TIMER, BqlRefCell<HPETTimer>, HPETState::validate_num_timers).with_version_id(0),
|
||||||
},
|
},
|
||||||
subsections: vmstate_subsections! {
|
subsections: vmstate_subsections! {
|
||||||
VMSTATE_HPET_RTC_IRQ_LEVEL,
|
VMSTATE_HPET_RTC_IRQ_LEVEL,
|
||||||
|
@ -1042,7 +1036,7 @@ impl DeviceImpl for HPETState {
|
||||||
Some(&VMSTATE_HPET)
|
Some(&VMSTATE_HPET)
|
||||||
}
|
}
|
||||||
|
|
||||||
const REALIZE: Option<fn(&Self)> = Some(Self::realize);
|
const REALIZE: Option<fn(&Self) -> qemu_api::Result<()>> = Some(Self::realize);
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ResettablePhasesImpl for HPETState {
|
impl ResettablePhasesImpl for HPETState {
|
||||||
|
|
|
@ -36,7 +36,7 @@ pub static mut hpet_fw_cfg: HPETFwConfig = HPETFwConfig {
|
||||||
};
|
};
|
||||||
|
|
||||||
impl HPETFwConfig {
|
impl HPETFwConfig {
|
||||||
pub(crate) fn assign_hpet_id() -> usize {
|
pub(crate) fn assign_hpet_id() -> Result<usize, &'static str> {
|
||||||
assert!(bql_locked());
|
assert!(bql_locked());
|
||||||
// SAFETY: all accesses go through these methods, which guarantee
|
// SAFETY: all accesses go through these methods, which guarantee
|
||||||
// that the accesses are protected by the BQL.
|
// that the accesses are protected by the BQL.
|
||||||
|
@ -48,13 +48,12 @@ impl HPETFwConfig {
|
||||||
}
|
}
|
||||||
|
|
||||||
if fw_cfg.count == 8 {
|
if fw_cfg.count == 8 {
|
||||||
// TODO: Add error binding: error_setg()
|
Err("Only 8 instances of HPET are allowed")?;
|
||||||
panic!("Only 8 instances of HPET is allowed");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let id: usize = fw_cfg.count.into();
|
let id: usize = fw_cfg.count.into();
|
||||||
fw_cfg.count += 1;
|
fw_cfg.count += 1;
|
||||||
id
|
Ok(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn update_hpet_cfg(hpet_id: usize, timer_block_id: u32, address: u64) {
|
pub(crate) fn update_hpet_cfg(hpet_id: usize, timer_block_id: u32, address: u64) {
|
||||||
|
|
|
@ -1,9 +1,13 @@
|
||||||
|
subproject('anyhow-1-rs', required: true)
|
||||||
subproject('bilge-0.2-rs', required: true)
|
subproject('bilge-0.2-rs', required: true)
|
||||||
subproject('bilge-impl-0.2-rs', required: true)
|
subproject('bilge-impl-0.2-rs', required: true)
|
||||||
|
subproject('foreign-0.3-rs', required: true)
|
||||||
subproject('libc-0.2-rs', required: true)
|
subproject('libc-0.2-rs', required: true)
|
||||||
|
|
||||||
|
anyhow_rs = dependency('anyhow-1-rs')
|
||||||
bilge_rs = dependency('bilge-0.2-rs')
|
bilge_rs = dependency('bilge-0.2-rs')
|
||||||
bilge_impl_rs = dependency('bilge-impl-0.2-rs')
|
bilge_impl_rs = dependency('bilge-impl-0.2-rs')
|
||||||
|
foreign_rs = dependency('foreign-0.3-rs')
|
||||||
libc_rs = dependency('libc-0.2-rs')
|
libc_rs = dependency('libc-0.2-rs')
|
||||||
|
|
||||||
subproject('proc-macro2-1-rs', required: true)
|
subproject('proc-macro2-1-rs', required: true)
|
||||||
|
|
|
@ -203,8 +203,8 @@ fn derive_tryinto_body(
|
||||||
Ok(quote! {
|
Ok(quote! {
|
||||||
#(const #discriminants: #repr = #name::#discriminants as #repr;)*;
|
#(const #discriminants: #repr = #name::#discriminants as #repr;)*;
|
||||||
match value {
|
match value {
|
||||||
#(#discriminants => Ok(#name::#discriminants),)*
|
#(#discriminants => core::result::Result::Ok(#name::#discriminants),)*
|
||||||
_ => Err(value),
|
_ => core::result::Result::Err(value),
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -236,7 +236,8 @@ fn derive_tryinto_or_error(input: DeriveInput) -> Result<proc_macro2::TokenStrea
|
||||||
impl core::convert::TryFrom<#repr> for #name {
|
impl core::convert::TryFrom<#repr> for #name {
|
||||||
type Error = #repr;
|
type Error = #repr;
|
||||||
|
|
||||||
fn try_from(value: #repr) -> Result<Self, Self::Error> {
|
#[allow(ambiguous_associated_items)]
|
||||||
|
fn try_from(value: #repr) -> Result<Self, #repr> {
|
||||||
#body
|
#body
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,9 @@ rust-version.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
qemu_api_macros = { path = "../qemu-api-macros" }
|
qemu_api_macros = { path = "../qemu-api-macros" }
|
||||||
|
anyhow = "~1.0"
|
||||||
libc = "0.2.162"
|
libc = "0.2.162"
|
||||||
|
foreign = "~0.3.1"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["debug_cell"]
|
default = ["debug_cell"]
|
||||||
|
|
|
@ -19,6 +19,7 @@ _qemu_api_rs = static_library(
|
||||||
'src/cell.rs',
|
'src/cell.rs',
|
||||||
'src/chardev.rs',
|
'src/chardev.rs',
|
||||||
'src/errno.rs',
|
'src/errno.rs',
|
||||||
|
'src/error.rs',
|
||||||
'src/irq.rs',
|
'src/irq.rs',
|
||||||
'src/memory.rs',
|
'src/memory.rs',
|
||||||
'src/module.rs',
|
'src/module.rs',
|
||||||
|
@ -35,7 +36,7 @@ _qemu_api_rs = static_library(
|
||||||
override_options: ['rust_std=2021', 'build.rust_std=2021'],
|
override_options: ['rust_std=2021', 'build.rust_std=2021'],
|
||||||
rust_abi: 'rust',
|
rust_abi: 'rust',
|
||||||
rust_args: _qemu_api_cfg,
|
rust_args: _qemu_api_cfg,
|
||||||
dependencies: [libc_rs, qemu_api_macros, qemuutil_rs,
|
dependencies: [anyhow_rs, foreign_rs, libc_rs, qemu_api_macros, qemuutil_rs,
|
||||||
qom, hwcore, chardev, migration],
|
qom, hwcore, chardev, migration],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
416
rust/qemu-api/src/error.rs
Normal file
416
rust/qemu-api/src/error.rs
Normal file
|
@ -0,0 +1,416 @@
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
//! Error propagation for QEMU Rust code
|
||||||
|
//!
|
||||||
|
//! This module contains [`Error`], the bridge between Rust errors and
|
||||||
|
//! [`Result`](std::result::Result)s and QEMU's C [`Error`](bindings::Error)
|
||||||
|
//! struct.
|
||||||
|
//!
|
||||||
|
//! For FFI code, [`Error`] provides functions to simplify conversion between
|
||||||
|
//! the Rust ([`Result<>`](std::result::Result)) and C (`Error**`) conventions:
|
||||||
|
//!
|
||||||
|
//! * [`ok_or_propagate`](crate::Error::ok_or_propagate),
|
||||||
|
//! [`bool_or_propagate`](crate::Error::bool_or_propagate),
|
||||||
|
//! [`ptr_or_propagate`](crate::Error::ptr_or_propagate) can be used to build
|
||||||
|
//! a C return value while also propagating an error condition
|
||||||
|
//!
|
||||||
|
//! * [`err_or_else`](crate::Error::err_or_else) and
|
||||||
|
//! [`err_or_unit`](crate::Error::err_or_unit) can be used to build a `Result`
|
||||||
|
//!
|
||||||
|
//! This module is most commonly used at the boundary between C and Rust code;
|
||||||
|
//! other code will usually access it through the
|
||||||
|
//! [`qemu_api::Result`](crate::Result) type alias, and will use the
|
||||||
|
//! [`std::error::Error`] interface to let C errors participate in Rust's error
|
||||||
|
//! handling functionality.
|
||||||
|
//!
|
||||||
|
//! Rust code can also create use this module to create an error object that
|
||||||
|
//! will be passed up to C code, though in most cases this will be done
|
||||||
|
//! transparently through the `?` operator. Errors can be constructed from a
|
||||||
|
//! simple error string, from an [`anyhow::Error`] to pass any other Rust error
|
||||||
|
//! type up to C code, or from a combination of the two.
|
||||||
|
//!
|
||||||
|
//! The third case, corresponding to [`Error::with_error`], is the only one that
|
||||||
|
//! requires mentioning [`qemu_api::Error`](crate::Error) explicitly. Similar
|
||||||
|
//! to how QEMU's C code handles errno values, the string and the
|
||||||
|
//! `anyhow::Error` object will be concatenated with `:` as the separator.
|
||||||
|
|
||||||
|
use std::{
|
||||||
|
borrow::Cow,
|
||||||
|
ffi::{c_char, c_int, c_void, CStr},
|
||||||
|
fmt::{self, Display},
|
||||||
|
panic, ptr,
|
||||||
|
};
|
||||||
|
|
||||||
|
use foreign::{prelude::*, OwnedPointer};
|
||||||
|
|
||||||
|
use crate::bindings;
|
||||||
|
|
||||||
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Error {
|
||||||
|
msg: Option<Cow<'static, str>>,
|
||||||
|
/// Appends the print string of the error to the msg if not None
|
||||||
|
cause: Option<anyhow::Error>,
|
||||||
|
file: &'static str,
|
||||||
|
line: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::error::Error for Error {
|
||||||
|
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
|
||||||
|
self.cause.as_ref().map(AsRef::as_ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(deprecated)]
|
||||||
|
fn description(&self) -> &str {
|
||||||
|
self.msg
|
||||||
|
.as_deref()
|
||||||
|
.or_else(|| self.cause.as_deref().map(std::error::Error::description))
|
||||||
|
.expect("no message nor cause?")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for Error {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
let mut prefix = "";
|
||||||
|
if let Some(ref msg) = self.msg {
|
||||||
|
write!(f, "{msg}")?;
|
||||||
|
prefix = ": ";
|
||||||
|
}
|
||||||
|
if let Some(ref cause) = self.cause {
|
||||||
|
write!(f, "{prefix}{cause}")?;
|
||||||
|
} else if prefix.is_empty() {
|
||||||
|
panic!("no message nor cause?");
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<String> for Error {
|
||||||
|
#[track_caller]
|
||||||
|
fn from(msg: String) -> Self {
|
||||||
|
let location = panic::Location::caller();
|
||||||
|
Error {
|
||||||
|
msg: Some(Cow::Owned(msg)),
|
||||||
|
cause: None,
|
||||||
|
file: location.file(),
|
||||||
|
line: location.line(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<&'static str> for Error {
|
||||||
|
#[track_caller]
|
||||||
|
fn from(msg: &'static str) -> Self {
|
||||||
|
let location = panic::Location::caller();
|
||||||
|
Error {
|
||||||
|
msg: Some(Cow::Borrowed(msg)),
|
||||||
|
cause: None,
|
||||||
|
file: location.file(),
|
||||||
|
line: location.line(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<anyhow::Error> for Error {
|
||||||
|
#[track_caller]
|
||||||
|
fn from(error: anyhow::Error) -> Self {
|
||||||
|
let location = panic::Location::caller();
|
||||||
|
Error {
|
||||||
|
msg: None,
|
||||||
|
cause: Some(error),
|
||||||
|
file: location.file(),
|
||||||
|
line: location.line(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Error {
|
||||||
|
/// Create a new error, prepending `msg` to the
|
||||||
|
/// description of `cause`
|
||||||
|
#[track_caller]
|
||||||
|
pub fn with_error(msg: impl Into<Cow<'static, str>>, cause: impl Into<anyhow::Error>) -> Self {
|
||||||
|
let location = panic::Location::caller();
|
||||||
|
Error {
|
||||||
|
msg: Some(msg.into()),
|
||||||
|
cause: Some(cause.into()),
|
||||||
|
file: location.file(),
|
||||||
|
line: location.line(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Consume a result, returning `false` if it is an error and
|
||||||
|
/// `true` if it is successful. The error is propagated into
|
||||||
|
/// `errp` like the C API `error_propagate` would do.
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// `errp` must be a valid argument to `error_propagate`;
|
||||||
|
/// typically it is received from C code and need not be
|
||||||
|
/// checked further at the Rust↔C boundary.
|
||||||
|
pub unsafe fn bool_or_propagate(result: Result<()>, errp: *mut *mut bindings::Error) -> bool {
|
||||||
|
// SAFETY: caller guarantees errp is valid
|
||||||
|
unsafe { Self::ok_or_propagate(result, errp) }.is_some()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Consume a result, returning a `NULL` pointer if it is an error and
|
||||||
|
/// a C representation of the contents if it is successful. This is
|
||||||
|
/// similar to the C API `error_propagate`, but it panics if `*errp`
|
||||||
|
/// is not `NULL`.
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// `errp` must be a valid argument to `error_propagate`;
|
||||||
|
/// typically it is received from C code and need not be
|
||||||
|
/// checked further at the Rust↔C boundary.
|
||||||
|
///
|
||||||
|
/// See [`propagate`](Error::propagate) for more information.
|
||||||
|
#[must_use]
|
||||||
|
pub unsafe fn ptr_or_propagate<T: CloneToForeign>(
|
||||||
|
result: Result<T>,
|
||||||
|
errp: *mut *mut bindings::Error,
|
||||||
|
) -> *mut T::Foreign {
|
||||||
|
// SAFETY: caller guarantees errp is valid
|
||||||
|
unsafe { Self::ok_or_propagate(result, errp) }.clone_to_foreign_ptr()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Consume a result in the same way as `self.ok()`, but also propagate
|
||||||
|
/// a possible error into `errp`. This is similar to the C API
|
||||||
|
/// `error_propagate`, but it panics if `*errp` is not `NULL`.
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// `errp` must be a valid argument to `error_propagate`;
|
||||||
|
/// typically it is received from C code and need not be
|
||||||
|
/// checked further at the Rust↔C boundary.
|
||||||
|
///
|
||||||
|
/// See [`propagate`](Error::propagate) for more information.
|
||||||
|
pub unsafe fn ok_or_propagate<T>(
|
||||||
|
result: Result<T>,
|
||||||
|
errp: *mut *mut bindings::Error,
|
||||||
|
) -> Option<T> {
|
||||||
|
result.map_err(|err| unsafe { err.propagate(errp) }).ok()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Equivalent of the C function `error_propagate`. Fill `*errp`
|
||||||
|
/// with the information container in `self` if `errp` is not NULL;
|
||||||
|
/// then consume it.
|
||||||
|
///
|
||||||
|
/// This is similar to the C API `error_propagate`, but it panics if
|
||||||
|
/// `*errp` is not `NULL`.
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// `errp` must be a valid argument to `error_propagate`; it can be
|
||||||
|
/// `NULL` or it can point to any of:
|
||||||
|
/// * `error_abort`
|
||||||
|
/// * `error_fatal`
|
||||||
|
/// * a local variable of (C) type `Error *`
|
||||||
|
///
|
||||||
|
/// Typically `errp` is received from C code and need not be
|
||||||
|
/// checked further at the Rust↔C boundary.
|
||||||
|
pub unsafe fn propagate(self, errp: *mut *mut bindings::Error) {
|
||||||
|
if errp.is_null() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// SAFETY: caller guarantees that errp and *errp are valid
|
||||||
|
unsafe {
|
||||||
|
assert_eq!(*errp, ptr::null_mut());
|
||||||
|
bindings::error_propagate(errp, self.clone_to_foreign_ptr());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert a C `Error*` into a Rust `Result`, using
|
||||||
|
/// `Ok(())` if `c_error` is NULL. Free the `Error*`.
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// `c_error` must be `NULL` or valid; typically it was initialized
|
||||||
|
/// with `ptr::null_mut()` and passed by reference to a C function.
|
||||||
|
pub unsafe fn err_or_unit(c_error: *mut bindings::Error) -> Result<()> {
|
||||||
|
// SAFETY: caller guarantees c_error is valid
|
||||||
|
unsafe { Self::err_or_else(c_error, || ()) }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert a C `Error*` into a Rust `Result`, calling `f()` to
|
||||||
|
/// obtain an `Ok` value if `c_error` is NULL. Free the `Error*`.
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// `c_error` must be `NULL` or point to a valid C [`struct
|
||||||
|
/// Error`](bindings::Error); typically it was initialized with
|
||||||
|
/// `ptr::null_mut()` and passed by reference to a C function.
|
||||||
|
pub unsafe fn err_or_else<T, F: FnOnce() -> T>(
|
||||||
|
c_error: *mut bindings::Error,
|
||||||
|
f: F,
|
||||||
|
) -> Result<T> {
|
||||||
|
// SAFETY: caller guarantees c_error is valid
|
||||||
|
let err = unsafe { Option::<Self>::from_foreign(c_error) };
|
||||||
|
match err {
|
||||||
|
None => Ok(f()),
|
||||||
|
Some(err) => Err(err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FreeForeign for Error {
|
||||||
|
type Foreign = bindings::Error;
|
||||||
|
|
||||||
|
unsafe fn free_foreign(p: *mut bindings::Error) {
|
||||||
|
// SAFETY: caller guarantees p is valid
|
||||||
|
unsafe {
|
||||||
|
bindings::error_free(p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CloneToForeign for Error {
|
||||||
|
fn clone_to_foreign(&self) -> OwnedPointer<Self> {
|
||||||
|
// SAFETY: all arguments are controlled by this function
|
||||||
|
unsafe {
|
||||||
|
let err: *mut c_void = libc::malloc(std::mem::size_of::<bindings::Error>());
|
||||||
|
let err: &mut bindings::Error = &mut *err.cast();
|
||||||
|
*err = bindings::Error {
|
||||||
|
msg: format!("{self}").clone_to_foreign_ptr(),
|
||||||
|
err_class: bindings::ERROR_CLASS_GENERIC_ERROR,
|
||||||
|
src_len: self.file.len() as c_int,
|
||||||
|
src: self.file.as_ptr().cast::<c_char>(),
|
||||||
|
line: self.line as c_int,
|
||||||
|
func: ptr::null_mut(),
|
||||||
|
hint: ptr::null_mut(),
|
||||||
|
};
|
||||||
|
OwnedPointer::new(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromForeign for Error {
|
||||||
|
unsafe fn cloned_from_foreign(c_error: *const bindings::Error) -> Self {
|
||||||
|
// SAFETY: caller guarantees c_error is valid
|
||||||
|
unsafe {
|
||||||
|
let error = &*c_error;
|
||||||
|
let file = if error.src_len < 0 {
|
||||||
|
// NUL-terminated
|
||||||
|
CStr::from_ptr(error.src).to_str()
|
||||||
|
} else {
|
||||||
|
// Can become str::from_utf8 with Rust 1.87.0
|
||||||
|
std::str::from_utf8(std::slice::from_raw_parts(
|
||||||
|
&*error.src.cast::<u8>(),
|
||||||
|
error.src_len as usize,
|
||||||
|
))
|
||||||
|
};
|
||||||
|
|
||||||
|
Error {
|
||||||
|
msg: FromForeign::cloned_from_foreign(error.msg),
|
||||||
|
cause: None,
|
||||||
|
file: file.unwrap(),
|
||||||
|
line: error.line as u32,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::ffi::CStr;
|
||||||
|
|
||||||
|
use anyhow::anyhow;
|
||||||
|
use foreign::OwnedPointer;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
use crate::{assert_match, bindings};
|
||||||
|
|
||||||
|
#[track_caller]
|
||||||
|
fn error_for_test(msg: &CStr) -> OwnedPointer<Error> {
|
||||||
|
// SAFETY: all arguments are controlled by this function
|
||||||
|
let location = panic::Location::caller();
|
||||||
|
unsafe {
|
||||||
|
let err: *mut c_void = libc::malloc(std::mem::size_of::<bindings::Error>());
|
||||||
|
let err: &mut bindings::Error = &mut *err.cast();
|
||||||
|
*err = bindings::Error {
|
||||||
|
msg: msg.clone_to_foreign_ptr(),
|
||||||
|
err_class: bindings::ERROR_CLASS_GENERIC_ERROR,
|
||||||
|
src_len: location.file().len() as c_int,
|
||||||
|
src: location.file().as_ptr().cast::<c_char>(),
|
||||||
|
line: location.line() as c_int,
|
||||||
|
func: ptr::null_mut(),
|
||||||
|
hint: ptr::null_mut(),
|
||||||
|
};
|
||||||
|
OwnedPointer::new(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn error_get_pretty<'a>(local_err: *mut bindings::Error) -> &'a CStr {
|
||||||
|
unsafe { CStr::from_ptr(bindings::error_get_pretty(local_err)) }
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[allow(deprecated)]
|
||||||
|
fn test_description() {
|
||||||
|
use std::error::Error;
|
||||||
|
|
||||||
|
assert_eq!(super::Error::from("msg").description(), "msg");
|
||||||
|
assert_eq!(super::Error::from("msg".to_owned()).description(), "msg");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_display() {
|
||||||
|
assert_eq!(&*format!("{}", Error::from("msg")), "msg");
|
||||||
|
assert_eq!(&*format!("{}", Error::from("msg".to_owned())), "msg");
|
||||||
|
assert_eq!(&*format!("{}", Error::from(anyhow!("msg"))), "msg");
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
&*format!("{}", Error::with_error("msg", anyhow!("cause"))),
|
||||||
|
"msg: cause"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_bool_or_propagate() {
|
||||||
|
unsafe {
|
||||||
|
let mut local_err: *mut bindings::Error = ptr::null_mut();
|
||||||
|
|
||||||
|
assert!(Error::bool_or_propagate(Ok(()), &mut local_err));
|
||||||
|
assert_eq!(local_err, ptr::null_mut());
|
||||||
|
|
||||||
|
let my_err = Error::from("msg");
|
||||||
|
assert!(!Error::bool_or_propagate(Err(my_err), &mut local_err));
|
||||||
|
assert_ne!(local_err, ptr::null_mut());
|
||||||
|
assert_eq!(error_get_pretty(local_err), c"msg");
|
||||||
|
bindings::error_free(local_err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_ptr_or_propagate() {
|
||||||
|
unsafe {
|
||||||
|
let mut local_err: *mut bindings::Error = ptr::null_mut();
|
||||||
|
|
||||||
|
let ret = Error::ptr_or_propagate(Ok("abc".to_owned()), &mut local_err);
|
||||||
|
assert_eq!(String::from_foreign(ret), "abc");
|
||||||
|
assert_eq!(local_err, ptr::null_mut());
|
||||||
|
|
||||||
|
let my_err = Error::from("msg");
|
||||||
|
assert_eq!(
|
||||||
|
Error::ptr_or_propagate(Err::<String, _>(my_err), &mut local_err),
|
||||||
|
ptr::null_mut()
|
||||||
|
);
|
||||||
|
assert_ne!(local_err, ptr::null_mut());
|
||||||
|
assert_eq!(error_get_pretty(local_err), c"msg");
|
||||||
|
bindings::error_free(local_err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_err_or_unit() {
|
||||||
|
unsafe {
|
||||||
|
let result = Error::err_or_unit(ptr::null_mut());
|
||||||
|
assert_match!(result, Ok(()));
|
||||||
|
|
||||||
|
let err = error_for_test(c"msg");
|
||||||
|
let err = Error::err_or_unit(err.into_inner()).unwrap_err();
|
||||||
|
assert_eq!(&*format!("{err}"), "msg");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -19,6 +19,7 @@ pub mod callbacks;
|
||||||
pub mod cell;
|
pub mod cell;
|
||||||
pub mod chardev;
|
pub mod chardev;
|
||||||
pub mod errno;
|
pub mod errno;
|
||||||
|
pub mod error;
|
||||||
pub mod irq;
|
pub mod irq;
|
||||||
pub mod memory;
|
pub mod memory;
|
||||||
pub mod module;
|
pub mod module;
|
||||||
|
@ -34,6 +35,8 @@ use std::{
|
||||||
ffi::c_void,
|
ffi::c_void,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub use error::{Error, Result};
|
||||||
|
|
||||||
#[cfg(HAVE_GLIB_WITH_ALIGNED_ALLOC)]
|
#[cfg(HAVE_GLIB_WITH_ALIGNED_ALLOC)]
|
||||||
extern "C" {
|
extern "C" {
|
||||||
fn g_aligned_alloc0(
|
fn g_aligned_alloc0(
|
||||||
|
|
|
@ -12,10 +12,11 @@ use std::{
|
||||||
pub use bindings::{ClockEvent, DeviceClass, Property, ResetType};
|
pub use bindings::{ClockEvent, DeviceClass, Property, ResetType};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
bindings::{self, qdev_init_gpio_in, qdev_init_gpio_out, Error, ResettableClass},
|
bindings::{self, qdev_init_gpio_in, qdev_init_gpio_out, ResettableClass},
|
||||||
callbacks::FnCall,
|
callbacks::FnCall,
|
||||||
cell::{bql_locked, Opaque},
|
cell::{bql_locked, Opaque},
|
||||||
chardev::Chardev,
|
chardev::Chardev,
|
||||||
|
error::{Error, Result},
|
||||||
irq::InterruptSource,
|
irq::InterruptSource,
|
||||||
prelude::*,
|
prelude::*,
|
||||||
qom::{ObjectClass, ObjectImpl, Owned},
|
qom::{ObjectClass, ObjectImpl, Owned},
|
||||||
|
@ -108,7 +109,7 @@ pub trait DeviceImpl: ObjectImpl + ResettablePhasesImpl + IsA<DeviceState> {
|
||||||
///
|
///
|
||||||
/// If not `None`, the parent class's `realize` method is overridden
|
/// If not `None`, the parent class's `realize` method is overridden
|
||||||
/// with the function pointed to by `REALIZE`.
|
/// with the function pointed to by `REALIZE`.
|
||||||
const REALIZE: Option<fn(&Self)> = None;
|
const REALIZE: Option<fn(&Self) -> Result<()>> = None;
|
||||||
|
|
||||||
/// An array providing the properties that the user can set on the
|
/// An array providing the properties that the user can set on the
|
||||||
/// device. Not a `const` because referencing statics in constants
|
/// device. Not a `const` because referencing statics in constants
|
||||||
|
@ -134,10 +135,13 @@ pub trait DeviceImpl: ObjectImpl + ResettablePhasesImpl + IsA<DeviceState> {
|
||||||
/// readable/writeable from one thread at any time.
|
/// readable/writeable from one thread at any time.
|
||||||
unsafe extern "C" fn rust_realize_fn<T: DeviceImpl>(
|
unsafe extern "C" fn rust_realize_fn<T: DeviceImpl>(
|
||||||
dev: *mut bindings::DeviceState,
|
dev: *mut bindings::DeviceState,
|
||||||
_errp: *mut *mut Error,
|
errp: *mut *mut bindings::Error,
|
||||||
) {
|
) {
|
||||||
let state = NonNull::new(dev).unwrap().cast::<T>();
|
let state = NonNull::new(dev).unwrap().cast::<T>();
|
||||||
T::REALIZE.unwrap()(unsafe { state.as_ref() });
|
let result = T::REALIZE.unwrap()(unsafe { state.as_ref() });
|
||||||
|
unsafe {
|
||||||
|
Error::ok_or_propagate(result, errp);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl InterfaceType for ResettableClass {
|
unsafe impl InterfaceType for ResettableClass {
|
||||||
|
|
|
@ -60,6 +60,7 @@ typedef enum memory_order {
|
||||||
#include "hw/qdev-properties-system.h"
|
#include "hw/qdev-properties-system.h"
|
||||||
#include "hw/irq.h"
|
#include "hw/irq.h"
|
||||||
#include "qapi/error.h"
|
#include "qapi/error.h"
|
||||||
|
#include "qapi/error-internal.h"
|
||||||
#include "migration/vmstate.h"
|
#include "migration/vmstate.h"
|
||||||
#include "chardev/char-serial.h"
|
#include "chardev/char-serial.h"
|
||||||
#include "exec/memattrs.h"
|
#include "exec/memattrs.h"
|
||||||
|
|
|
@ -27,8 +27,9 @@ sub_file="${sub_tdir}/submodule.tar"
|
||||||
# in their checkout, because the build environment is completely
|
# in their checkout, because the build environment is completely
|
||||||
# different to the host OS.
|
# different to the host OS.
|
||||||
subprojects="keycodemapdb libvfio-user berkeley-softfloat-3
|
subprojects="keycodemapdb libvfio-user berkeley-softfloat-3
|
||||||
berkeley-testfloat-3 arbitrary-int-1-rs bilge-0.2-rs
|
berkeley-testfloat-3 anyhow-1-rs arbitrary-int-1-rs bilge-0.2-rs
|
||||||
bilge-impl-0.2-rs either-1-rs itertools-0.11-rs libc-0.2-rs proc-macro2-1-rs
|
bilge-impl-0.2-rs either-1-rs foreign-0.3-rs itertools-0.11-rs
|
||||||
|
libc-0.2-rs proc-macro2-1-rs
|
||||||
proc-macro-error-1-rs proc-macro-error-attr-1-rs quote-1-rs
|
proc-macro-error-1-rs proc-macro-error-attr-1-rs quote-1-rs
|
||||||
syn-2-rs unicode-ident-1-rs"
|
syn-2-rs unicode-ident-1-rs"
|
||||||
sub_deinit=""
|
sub_deinit=""
|
||||||
|
|
|
@ -40,8 +40,9 @@ fi
|
||||||
|
|
||||||
# Only include wraps that are invoked with subproject()
|
# Only include wraps that are invoked with subproject()
|
||||||
SUBPROJECTS="libvfio-user keycodemapdb berkeley-softfloat-3
|
SUBPROJECTS="libvfio-user keycodemapdb berkeley-softfloat-3
|
||||||
berkeley-testfloat-3 arbitrary-int-1-rs bilge-0.2-rs
|
berkeley-testfloat-3 anyhow-1-rs arbitrary-int-1-rs bilge-0.2-rs
|
||||||
bilge-impl-0.2-rs either-1-rs itertools-0.11-rs libc-0.2-rs proc-macro2-1-rs
|
bilge-impl-0.2-rs either-1-rs foreign-0.3-rs itertools-0.11-rs
|
||||||
|
libc-0.2-rs proc-macro2-1-rs
|
||||||
proc-macro-error-1-rs proc-macro-error-attr-1-rs quote-1-rs
|
proc-macro-error-1-rs proc-macro-error-attr-1-rs quote-1-rs
|
||||||
syn-2-rs unicode-ident-1-rs"
|
syn-2-rs unicode-ident-1-rs"
|
||||||
|
|
||||||
|
|
2
subprojects/.gitignore
vendored
2
subprojects/.gitignore
vendored
|
@ -6,10 +6,12 @@
|
||||||
/keycodemapdb
|
/keycodemapdb
|
||||||
/libvfio-user
|
/libvfio-user
|
||||||
/slirp
|
/slirp
|
||||||
|
/anyhow-1.0.98
|
||||||
/arbitrary-int-1.2.7
|
/arbitrary-int-1.2.7
|
||||||
/bilge-0.2.0
|
/bilge-0.2.0
|
||||||
/bilge-impl-0.2.0
|
/bilge-impl-0.2.0
|
||||||
/either-1.12.0
|
/either-1.12.0
|
||||||
|
/foreign-0.3.1
|
||||||
/itertools-0.11.0
|
/itertools-0.11.0
|
||||||
/libc-0.2.162
|
/libc-0.2.162
|
||||||
/proc-macro-error-1.0.4
|
/proc-macro-error-1.0.4
|
||||||
|
|
7
subprojects/anyhow-1-rs.wrap
Normal file
7
subprojects/anyhow-1-rs.wrap
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
[wrap-file]
|
||||||
|
directory = anyhow-1.0.98
|
||||||
|
source_url = https://crates.io/api/v1/crates/anyhow/1.0.98/download
|
||||||
|
source_filename = anyhow-1.0.98.tar.gz
|
||||||
|
source_hash = e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487
|
||||||
|
#method = cargo
|
||||||
|
patch_directory = anyhow-1-rs
|
7
subprojects/foreign-0.3-rs.wrap
Normal file
7
subprojects/foreign-0.3-rs.wrap
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
[wrap-file]
|
||||||
|
directory = foreign-0.3.1
|
||||||
|
source_url = https://crates.io/api/v1/crates/foreign/0.3.1/download
|
||||||
|
source_filename = foreign-0.3.1.tar.gz
|
||||||
|
source_hash = 17ca1b5be8c9d320daf386f1809c7acc0cb09accbae795c2001953fa50585846
|
||||||
|
#method = cargo
|
||||||
|
patch_directory = foreign-0.3-rs
|
33
subprojects/packagefiles/anyhow-1-rs/meson.build
Normal file
33
subprojects/packagefiles/anyhow-1-rs/meson.build
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
project('anyhow-1-rs', 'rust',
|
||||||
|
meson_version: '>=1.5.0',
|
||||||
|
version: '1.0.98',
|
||||||
|
license: 'MIT OR Apache-2.0',
|
||||||
|
default_options: [])
|
||||||
|
|
||||||
|
rustc = meson.get_compiler('rust')
|
||||||
|
|
||||||
|
rust_args = ['--cap-lints', 'allow']
|
||||||
|
rust_args += ['--cfg', 'feature="std"']
|
||||||
|
if rustc.version().version_compare('<1.65.0')
|
||||||
|
error('rustc version ' + rustc.version() + ' is unsupported. Please upgrade to at least 1.65.0')
|
||||||
|
endif
|
||||||
|
rust_args += [ '--cfg', 'std_backtrace' ] # >= 1.65.0
|
||||||
|
if rustc.version().version_compare('<1.81.0')
|
||||||
|
rust_args += [ '--cfg', 'anyhow_no_core_error' ]
|
||||||
|
endif
|
||||||
|
|
||||||
|
_anyhow_rs = static_library(
|
||||||
|
'anyhow',
|
||||||
|
files('src/lib.rs'),
|
||||||
|
gnu_symbol_visibility: 'hidden',
|
||||||
|
override_options: ['rust_std=2018', 'build.rust_std=2018'],
|
||||||
|
rust_abi: 'rust',
|
||||||
|
rust_args: rust_args,
|
||||||
|
dependencies: [],
|
||||||
|
)
|
||||||
|
|
||||||
|
anyhow_dep = declare_dependency(
|
||||||
|
link_with: _anyhow_rs,
|
||||||
|
)
|
||||||
|
|
||||||
|
meson.override_dependency('anyhow-1-rs', anyhow_dep)
|
26
subprojects/packagefiles/foreign-0.3-rs/meson.build
Normal file
26
subprojects/packagefiles/foreign-0.3-rs/meson.build
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
project('foreign-0.3-rs', 'rust',
|
||||||
|
meson_version: '>=1.5.0',
|
||||||
|
version: '0.2.0',
|
||||||
|
license: 'MIT OR Apache-2.0',
|
||||||
|
default_options: [])
|
||||||
|
|
||||||
|
subproject('libc-0.2-rs', required: true)
|
||||||
|
libc_rs = dependency('libc-0.2-rs')
|
||||||
|
|
||||||
|
_foreign_rs = static_library(
|
||||||
|
'foreign',
|
||||||
|
files('src/lib.rs'),
|
||||||
|
gnu_symbol_visibility: 'hidden',
|
||||||
|
override_options: ['rust_std=2021', 'build.rust_std=2021'],
|
||||||
|
rust_abi: 'rust',
|
||||||
|
rust_args: [
|
||||||
|
'--cap-lints', 'allow',
|
||||||
|
],
|
||||||
|
dependencies: [libc_rs],
|
||||||
|
)
|
||||||
|
|
||||||
|
foreign_dep = declare_dependency(
|
||||||
|
link_with: _foreign_rs,
|
||||||
|
)
|
||||||
|
|
||||||
|
meson.override_dependency('foreign-0.3-rs', foreign_dep)
|
|
@ -6018,9 +6018,11 @@ static bool host_supports_vmx(void)
|
||||||
* because private/shared page tracking is already provided through other
|
* because private/shared page tracking is already provided through other
|
||||||
* means, these 2 use-cases should be treated as being mutually-exclusive.
|
* means, these 2 use-cases should be treated as being mutually-exclusive.
|
||||||
*/
|
*/
|
||||||
static int kvm_handle_hc_map_gpa_range(struct kvm_run *run)
|
static int kvm_handle_hc_map_gpa_range(X86CPU *cpu, struct kvm_run *run)
|
||||||
{
|
{
|
||||||
|
struct kvm_pre_fault_memory mem;
|
||||||
uint64_t gpa, size, attributes;
|
uint64_t gpa, size, attributes;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!machine_require_guest_memfd(current_machine))
|
if (!machine_require_guest_memfd(current_machine))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -6031,13 +6033,32 @@ static int kvm_handle_hc_map_gpa_range(struct kvm_run *run)
|
||||||
|
|
||||||
trace_kvm_hc_map_gpa_range(gpa, size, attributes, run->hypercall.flags);
|
trace_kvm_hc_map_gpa_range(gpa, size, attributes, run->hypercall.flags);
|
||||||
|
|
||||||
return kvm_convert_memory(gpa, size, attributes & KVM_MAP_GPA_RANGE_ENCRYPTED);
|
ret = kvm_convert_memory(gpa, size, attributes & KVM_MAP_GPA_RANGE_ENCRYPTED);
|
||||||
|
if (ret || !kvm_pre_fault_memory_supported) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Opportunistically pre-fault memory in. Failures are ignored so that any
|
||||||
|
* errors in faulting in the memory will get captured in KVM page fault
|
||||||
|
* path when the guest first accesses the page.
|
||||||
|
*/
|
||||||
|
memset(&mem, 0, sizeof(mem));
|
||||||
|
mem.gpa = gpa;
|
||||||
|
mem.size = size;
|
||||||
|
while (mem.size) {
|
||||||
|
if (kvm_vcpu_ioctl(CPU(cpu), KVM_PRE_FAULT_MEMORY, &mem)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_handle_hypercall(struct kvm_run *run)
|
static int kvm_handle_hypercall(X86CPU *cpu, struct kvm_run *run)
|
||||||
{
|
{
|
||||||
if (run->hypercall.nr == KVM_HC_MAP_GPA_RANGE)
|
if (run->hypercall.nr == KVM_HC_MAP_GPA_RANGE)
|
||||||
return kvm_handle_hc_map_gpa_range(run);
|
return kvm_handle_hc_map_gpa_range(cpu, run);
|
||||||
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -6137,7 +6158,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
case KVM_EXIT_HYPERCALL:
|
case KVM_EXIT_HYPERCALL:
|
||||||
ret = kvm_handle_hypercall(run);
|
ret = kvm_handle_hypercall(cpu, run);
|
||||||
break;
|
break;
|
||||||
case KVM_EXIT_SYSTEM_EVENT:
|
case KVM_EXIT_SYSTEM_EVENT:
|
||||||
switch (run->system_event.type) {
|
switch (run->system_event.type) {
|
||||||
|
|
|
@ -842,7 +842,7 @@ int64_t helper_cvttsd2sq(CPUX86State *env, ZMMReg *s)
|
||||||
|
|
||||||
void glue(helper_rsqrtps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s)
|
void glue(helper_rsqrtps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = get_float_exception_flags(&env->sse_status);
|
int old_flags = get_float_exception_flags(&env->sse_status);
|
||||||
int i;
|
int i;
|
||||||
for (i = 0; i < 2 << SHIFT; i++) {
|
for (i = 0; i < 2 << SHIFT; i++) {
|
||||||
d->ZMM_S(i) = float32_div(float32_one,
|
d->ZMM_S(i) = float32_div(float32_one,
|
||||||
|
@ -855,7 +855,7 @@ void glue(helper_rsqrtps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s)
|
||||||
#if SHIFT == 1
|
#if SHIFT == 1
|
||||||
void helper_rsqrtss(CPUX86State *env, ZMMReg *d, ZMMReg *v, ZMMReg *s)
|
void helper_rsqrtss(CPUX86State *env, ZMMReg *d, ZMMReg *v, ZMMReg *s)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = get_float_exception_flags(&env->sse_status);
|
int old_flags = get_float_exception_flags(&env->sse_status);
|
||||||
int i;
|
int i;
|
||||||
d->ZMM_S(0) = float32_div(float32_one,
|
d->ZMM_S(0) = float32_div(float32_one,
|
||||||
float32_sqrt(s->ZMM_S(0), &env->sse_status),
|
float32_sqrt(s->ZMM_S(0), &env->sse_status),
|
||||||
|
@ -869,7 +869,7 @@ void helper_rsqrtss(CPUX86State *env, ZMMReg *d, ZMMReg *v, ZMMReg *s)
|
||||||
|
|
||||||
void glue(helper_rcpps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s)
|
void glue(helper_rcpps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = get_float_exception_flags(&env->sse_status);
|
int old_flags = get_float_exception_flags(&env->sse_status);
|
||||||
int i;
|
int i;
|
||||||
for (i = 0; i < 2 << SHIFT; i++) {
|
for (i = 0; i < 2 << SHIFT; i++) {
|
||||||
d->ZMM_S(i) = float32_div(float32_one, s->ZMM_S(i), &env->sse_status);
|
d->ZMM_S(i) = float32_div(float32_one, s->ZMM_S(i), &env->sse_status);
|
||||||
|
@ -880,7 +880,7 @@ void glue(helper_rcpps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s)
|
||||||
#if SHIFT == 1
|
#if SHIFT == 1
|
||||||
void helper_rcpss(CPUX86State *env, ZMMReg *d, ZMMReg *v, ZMMReg *s)
|
void helper_rcpss(CPUX86State *env, ZMMReg *d, ZMMReg *v, ZMMReg *s)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = get_float_exception_flags(&env->sse_status);
|
int old_flags = get_float_exception_flags(&env->sse_status);
|
||||||
int i;
|
int i;
|
||||||
d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status);
|
d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status);
|
||||||
for (i = 1; i < 2 << SHIFT; i++) {
|
for (i = 1; i < 2 << SHIFT; i++) {
|
||||||
|
@ -1714,7 +1714,7 @@ void glue(helper_phminposuw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
|
||||||
void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
|
void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
|
||||||
uint32_t mode)
|
uint32_t mode)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = get_float_exception_flags(&env->sse_status);
|
int old_flags = get_float_exception_flags(&env->sse_status);
|
||||||
signed char prev_rounding_mode;
|
signed char prev_rounding_mode;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -1738,7 +1738,7 @@ void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
|
||||||
void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
|
void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
|
||||||
uint32_t mode)
|
uint32_t mode)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = get_float_exception_flags(&env->sse_status);
|
int old_flags = get_float_exception_flags(&env->sse_status);
|
||||||
signed char prev_rounding_mode;
|
signed char prev_rounding_mode;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -1763,7 +1763,7 @@ void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
|
||||||
void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s,
|
void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s,
|
||||||
uint32_t mode)
|
uint32_t mode)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = get_float_exception_flags(&env->sse_status);
|
int old_flags = get_float_exception_flags(&env->sse_status);
|
||||||
signed char prev_rounding_mode;
|
signed char prev_rounding_mode;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -1788,7 +1788,7 @@ void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s,
|
||||||
void glue(helper_roundsd, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s,
|
void glue(helper_roundsd, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s,
|
||||||
uint32_t mode)
|
uint32_t mode)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = get_float_exception_flags(&env->sse_status);
|
int old_flags = get_float_exception_flags(&env->sse_status);
|
||||||
signed char prev_rounding_mode;
|
signed char prev_rounding_mode;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
|
|
@ -189,25 +189,25 @@ void cpu_init_fp_statuses(CPUX86State *env)
|
||||||
set_float_default_nan_pattern(0b11000000, &env->mmx_status);
|
set_float_default_nan_pattern(0b11000000, &env->mmx_status);
|
||||||
set_float_default_nan_pattern(0b11000000, &env->sse_status);
|
set_float_default_nan_pattern(0b11000000, &env->sse_status);
|
||||||
/*
|
/*
|
||||||
* TODO: x86 does flush-to-zero detection after rounding (the SDM
|
* x86 does flush-to-zero detection after rounding (the SDM
|
||||||
* section 10.2.3.3 on the FTZ bit of MXCSR says that we flush
|
* section 10.2.3.3 on the FTZ bit of MXCSR says that we flush
|
||||||
* when we detect underflow, which x86 does after rounding).
|
* when we detect underflow, which x86 does after rounding).
|
||||||
*/
|
*/
|
||||||
set_float_ftz_detection(float_ftz_before_rounding, &env->fp_status);
|
set_float_ftz_detection(float_ftz_after_rounding, &env->fp_status);
|
||||||
set_float_ftz_detection(float_ftz_before_rounding, &env->mmx_status);
|
set_float_ftz_detection(float_ftz_after_rounding, &env->mmx_status);
|
||||||
set_float_ftz_detection(float_ftz_before_rounding, &env->sse_status);
|
set_float_ftz_detection(float_ftz_after_rounding, &env->sse_status);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline uint8_t save_exception_flags(CPUX86State *env)
|
static inline int save_exception_flags(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = get_float_exception_flags(&env->fp_status);
|
int old_flags = get_float_exception_flags(&env->fp_status);
|
||||||
set_float_exception_flags(0, &env->fp_status);
|
set_float_exception_flags(0, &env->fp_status);
|
||||||
return old_flags;
|
return old_flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void merge_exception_flags(CPUX86State *env, uint8_t old_flags)
|
static void merge_exception_flags(CPUX86State *env, int old_flags)
|
||||||
{
|
{
|
||||||
uint8_t new_flags = get_float_exception_flags(&env->fp_status);
|
int new_flags = get_float_exception_flags(&env->fp_status);
|
||||||
float_raise(old_flags, &env->fp_status);
|
float_raise(old_flags, &env->fp_status);
|
||||||
fpu_set_exception(env,
|
fpu_set_exception(env,
|
||||||
((new_flags & float_flag_invalid ? FPUS_IE : 0) |
|
((new_flags & float_flag_invalid ? FPUS_IE : 0) |
|
||||||
|
@ -215,12 +215,12 @@ static void merge_exception_flags(CPUX86State *env, uint8_t old_flags)
|
||||||
(new_flags & float_flag_overflow ? FPUS_OE : 0) |
|
(new_flags & float_flag_overflow ? FPUS_OE : 0) |
|
||||||
(new_flags & float_flag_underflow ? FPUS_UE : 0) |
|
(new_flags & float_flag_underflow ? FPUS_UE : 0) |
|
||||||
(new_flags & float_flag_inexact ? FPUS_PE : 0) |
|
(new_flags & float_flag_inexact ? FPUS_PE : 0) |
|
||||||
(new_flags & float_flag_input_denormal_flushed ? FPUS_DE : 0)));
|
(new_flags & float_flag_input_denormal_used ? FPUS_DE : 0)));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline floatx80 helper_fdiv(CPUX86State *env, floatx80 a, floatx80 b)
|
static inline floatx80 helper_fdiv(CPUX86State *env, floatx80 a, floatx80 b)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
floatx80 ret = floatx80_div(a, b, &env->fp_status);
|
floatx80 ret = floatx80_div(a, b, &env->fp_status);
|
||||||
merge_exception_flags(env, old_flags);
|
merge_exception_flags(env, old_flags);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -240,7 +240,7 @@ static void fpu_raise_exception(CPUX86State *env, uintptr_t retaddr)
|
||||||
|
|
||||||
void helper_flds_FT0(CPUX86State *env, uint32_t val)
|
void helper_flds_FT0(CPUX86State *env, uint32_t val)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
union {
|
union {
|
||||||
float32 f;
|
float32 f;
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
|
@ -253,7 +253,7 @@ void helper_flds_FT0(CPUX86State *env, uint32_t val)
|
||||||
|
|
||||||
void helper_fldl_FT0(CPUX86State *env, uint64_t val)
|
void helper_fldl_FT0(CPUX86State *env, uint64_t val)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
union {
|
union {
|
||||||
float64 f;
|
float64 f;
|
||||||
uint64_t i;
|
uint64_t i;
|
||||||
|
@ -271,7 +271,7 @@ void helper_fildl_FT0(CPUX86State *env, int32_t val)
|
||||||
|
|
||||||
void helper_flds_ST0(CPUX86State *env, uint32_t val)
|
void helper_flds_ST0(CPUX86State *env, uint32_t val)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
int new_fpstt;
|
int new_fpstt;
|
||||||
union {
|
union {
|
||||||
float32 f;
|
float32 f;
|
||||||
|
@ -288,7 +288,7 @@ void helper_flds_ST0(CPUX86State *env, uint32_t val)
|
||||||
|
|
||||||
void helper_fldl_ST0(CPUX86State *env, uint64_t val)
|
void helper_fldl_ST0(CPUX86State *env, uint64_t val)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
int new_fpstt;
|
int new_fpstt;
|
||||||
union {
|
union {
|
||||||
float64 f;
|
float64 f;
|
||||||
|
@ -338,7 +338,7 @@ void helper_fildll_ST0(CPUX86State *env, int64_t val)
|
||||||
|
|
||||||
uint32_t helper_fsts_ST0(CPUX86State *env)
|
uint32_t helper_fsts_ST0(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
union {
|
union {
|
||||||
float32 f;
|
float32 f;
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
|
@ -351,7 +351,7 @@ uint32_t helper_fsts_ST0(CPUX86State *env)
|
||||||
|
|
||||||
uint64_t helper_fstl_ST0(CPUX86State *env)
|
uint64_t helper_fstl_ST0(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
union {
|
union {
|
||||||
float64 f;
|
float64 f;
|
||||||
uint64_t i;
|
uint64_t i;
|
||||||
|
@ -364,7 +364,7 @@ uint64_t helper_fstl_ST0(CPUX86State *env)
|
||||||
|
|
||||||
int32_t helper_fist_ST0(CPUX86State *env)
|
int32_t helper_fist_ST0(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
int32_t val;
|
int32_t val;
|
||||||
|
|
||||||
val = floatx80_to_int32(ST0, &env->fp_status);
|
val = floatx80_to_int32(ST0, &env->fp_status);
|
||||||
|
@ -378,7 +378,7 @@ int32_t helper_fist_ST0(CPUX86State *env)
|
||||||
|
|
||||||
int32_t helper_fistl_ST0(CPUX86State *env)
|
int32_t helper_fistl_ST0(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
int32_t val;
|
int32_t val;
|
||||||
|
|
||||||
val = floatx80_to_int32(ST0, &env->fp_status);
|
val = floatx80_to_int32(ST0, &env->fp_status);
|
||||||
|
@ -391,7 +391,7 @@ int32_t helper_fistl_ST0(CPUX86State *env)
|
||||||
|
|
||||||
int64_t helper_fistll_ST0(CPUX86State *env)
|
int64_t helper_fistll_ST0(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
int64_t val;
|
int64_t val;
|
||||||
|
|
||||||
val = floatx80_to_int64(ST0, &env->fp_status);
|
val = floatx80_to_int64(ST0, &env->fp_status);
|
||||||
|
@ -404,7 +404,7 @@ int64_t helper_fistll_ST0(CPUX86State *env)
|
||||||
|
|
||||||
int32_t helper_fistt_ST0(CPUX86State *env)
|
int32_t helper_fistt_ST0(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
int32_t val;
|
int32_t val;
|
||||||
|
|
||||||
val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
|
val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
|
||||||
|
@ -418,7 +418,7 @@ int32_t helper_fistt_ST0(CPUX86State *env)
|
||||||
|
|
||||||
int32_t helper_fisttl_ST0(CPUX86State *env)
|
int32_t helper_fisttl_ST0(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
int32_t val;
|
int32_t val;
|
||||||
|
|
||||||
val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
|
val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
|
||||||
|
@ -431,7 +431,7 @@ int32_t helper_fisttl_ST0(CPUX86State *env)
|
||||||
|
|
||||||
int64_t helper_fisttll_ST0(CPUX86State *env)
|
int64_t helper_fisttll_ST0(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
int64_t val;
|
int64_t val;
|
||||||
|
|
||||||
val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
|
val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
|
||||||
|
@ -527,7 +527,7 @@ static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
|
||||||
|
|
||||||
void helper_fcom_ST0_FT0(CPUX86State *env)
|
void helper_fcom_ST0_FT0(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
FloatRelation ret;
|
FloatRelation ret;
|
||||||
|
|
||||||
ret = floatx80_compare(ST0, FT0, &env->fp_status);
|
ret = floatx80_compare(ST0, FT0, &env->fp_status);
|
||||||
|
@ -537,7 +537,7 @@ void helper_fcom_ST0_FT0(CPUX86State *env)
|
||||||
|
|
||||||
void helper_fucom_ST0_FT0(CPUX86State *env)
|
void helper_fucom_ST0_FT0(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
FloatRelation ret;
|
FloatRelation ret;
|
||||||
|
|
||||||
ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
|
ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
|
||||||
|
@ -549,7 +549,7 @@ static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
|
||||||
|
|
||||||
void helper_fcomi_ST0_FT0(CPUX86State *env)
|
void helper_fcomi_ST0_FT0(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
int eflags;
|
int eflags;
|
||||||
FloatRelation ret;
|
FloatRelation ret;
|
||||||
|
|
||||||
|
@ -562,7 +562,7 @@ void helper_fcomi_ST0_FT0(CPUX86State *env)
|
||||||
|
|
||||||
void helper_fucomi_ST0_FT0(CPUX86State *env)
|
void helper_fucomi_ST0_FT0(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
int eflags;
|
int eflags;
|
||||||
FloatRelation ret;
|
FloatRelation ret;
|
||||||
|
|
||||||
|
@ -575,28 +575,28 @@ void helper_fucomi_ST0_FT0(CPUX86State *env)
|
||||||
|
|
||||||
void helper_fadd_ST0_FT0(CPUX86State *env)
|
void helper_fadd_ST0_FT0(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
ST0 = floatx80_add(ST0, FT0, &env->fp_status);
|
ST0 = floatx80_add(ST0, FT0, &env->fp_status);
|
||||||
merge_exception_flags(env, old_flags);
|
merge_exception_flags(env, old_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_fmul_ST0_FT0(CPUX86State *env)
|
void helper_fmul_ST0_FT0(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
|
ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
|
||||||
merge_exception_flags(env, old_flags);
|
merge_exception_flags(env, old_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_fsub_ST0_FT0(CPUX86State *env)
|
void helper_fsub_ST0_FT0(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
|
ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
|
||||||
merge_exception_flags(env, old_flags);
|
merge_exception_flags(env, old_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_fsubr_ST0_FT0(CPUX86State *env)
|
void helper_fsubr_ST0_FT0(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
|
ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
|
||||||
merge_exception_flags(env, old_flags);
|
merge_exception_flags(env, old_flags);
|
||||||
}
|
}
|
||||||
|
@ -615,28 +615,28 @@ void helper_fdivr_ST0_FT0(CPUX86State *env)
|
||||||
|
|
||||||
void helper_fadd_STN_ST0(CPUX86State *env, int st_index)
|
void helper_fadd_STN_ST0(CPUX86State *env, int st_index)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
|
ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
|
||||||
merge_exception_flags(env, old_flags);
|
merge_exception_flags(env, old_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_fmul_STN_ST0(CPUX86State *env, int st_index)
|
void helper_fmul_STN_ST0(CPUX86State *env, int st_index)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
|
ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
|
||||||
merge_exception_flags(env, old_flags);
|
merge_exception_flags(env, old_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_fsub_STN_ST0(CPUX86State *env, int st_index)
|
void helper_fsub_STN_ST0(CPUX86State *env, int st_index)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
|
ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
|
||||||
merge_exception_flags(env, old_flags);
|
merge_exception_flags(env, old_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_fsubr_STN_ST0(CPUX86State *env, int st_index)
|
void helper_fsubr_STN_ST0(CPUX86State *env, int st_index)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status);
|
ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status);
|
||||||
merge_exception_flags(env, old_flags);
|
merge_exception_flags(env, old_flags);
|
||||||
}
|
}
|
||||||
|
@ -861,7 +861,7 @@ void helper_fbld_ST0(CPUX86State *env, target_ulong ptr)
|
||||||
|
|
||||||
void helper_fbst_ST0(CPUX86State *env, target_ulong ptr)
|
void helper_fbst_ST0(CPUX86State *env, target_ulong ptr)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
int v;
|
int v;
|
||||||
target_ulong mem_ref, mem_end;
|
target_ulong mem_ref, mem_end;
|
||||||
int64_t val;
|
int64_t val;
|
||||||
|
@ -1136,7 +1136,7 @@ static const struct f2xm1_data f2xm1_table[65] = {
|
||||||
|
|
||||||
void helper_f2xm1(CPUX86State *env)
|
void helper_f2xm1(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
uint64_t sig = extractFloatx80Frac(ST0);
|
uint64_t sig = extractFloatx80Frac(ST0);
|
||||||
int32_t exp = extractFloatx80Exp(ST0);
|
int32_t exp = extractFloatx80Exp(ST0);
|
||||||
bool sign = extractFloatx80Sign(ST0);
|
bool sign = extractFloatx80Sign(ST0);
|
||||||
|
@ -1369,7 +1369,7 @@ static const struct fpatan_data fpatan_table[9] = {
|
||||||
|
|
||||||
void helper_fpatan(CPUX86State *env)
|
void helper_fpatan(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
uint64_t arg0_sig = extractFloatx80Frac(ST0);
|
uint64_t arg0_sig = extractFloatx80Frac(ST0);
|
||||||
int32_t arg0_exp = extractFloatx80Exp(ST0);
|
int32_t arg0_exp = extractFloatx80Exp(ST0);
|
||||||
bool arg0_sign = extractFloatx80Sign(ST0);
|
bool arg0_sign = extractFloatx80Sign(ST0);
|
||||||
|
@ -1808,7 +1808,7 @@ void helper_fpatan(CPUX86State *env)
|
||||||
|
|
||||||
void helper_fxtract(CPUX86State *env)
|
void helper_fxtract(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
CPU_LDoubleU temp;
|
CPU_LDoubleU temp;
|
||||||
|
|
||||||
temp.d = ST0;
|
temp.d = ST0;
|
||||||
|
@ -1857,7 +1857,7 @@ void helper_fxtract(CPUX86State *env)
|
||||||
|
|
||||||
static void helper_fprem_common(CPUX86State *env, bool mod)
|
static void helper_fprem_common(CPUX86State *env, bool mod)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
uint64_t quotient;
|
uint64_t quotient;
|
||||||
CPU_LDoubleU temp0, temp1;
|
CPU_LDoubleU temp0, temp1;
|
||||||
int exp0, exp1, expdiff;
|
int exp0, exp1, expdiff;
|
||||||
|
@ -2053,7 +2053,7 @@ static void helper_fyl2x_common(CPUX86State *env, floatx80 arg, int32_t *exp,
|
||||||
|
|
||||||
void helper_fyl2xp1(CPUX86State *env)
|
void helper_fyl2xp1(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
uint64_t arg0_sig = extractFloatx80Frac(ST0);
|
uint64_t arg0_sig = extractFloatx80Frac(ST0);
|
||||||
int32_t arg0_exp = extractFloatx80Exp(ST0);
|
int32_t arg0_exp = extractFloatx80Exp(ST0);
|
||||||
bool arg0_sign = extractFloatx80Sign(ST0);
|
bool arg0_sign = extractFloatx80Sign(ST0);
|
||||||
|
@ -2151,7 +2151,7 @@ void helper_fyl2xp1(CPUX86State *env)
|
||||||
|
|
||||||
void helper_fyl2x(CPUX86State *env)
|
void helper_fyl2x(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
uint64_t arg0_sig = extractFloatx80Frac(ST0);
|
uint64_t arg0_sig = extractFloatx80Frac(ST0);
|
||||||
int32_t arg0_exp = extractFloatx80Exp(ST0);
|
int32_t arg0_exp = extractFloatx80Exp(ST0);
|
||||||
bool arg0_sign = extractFloatx80Sign(ST0);
|
bool arg0_sign = extractFloatx80Sign(ST0);
|
||||||
|
@ -2298,7 +2298,7 @@ void helper_fyl2x(CPUX86State *env)
|
||||||
|
|
||||||
void helper_fsqrt(CPUX86State *env)
|
void helper_fsqrt(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
if (floatx80_is_neg(ST0)) {
|
if (floatx80_is_neg(ST0)) {
|
||||||
env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
|
env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
|
||||||
env->fpus |= 0x400;
|
env->fpus |= 0x400;
|
||||||
|
@ -2324,14 +2324,14 @@ void helper_fsincos(CPUX86State *env)
|
||||||
|
|
||||||
void helper_frndint(CPUX86State *env)
|
void helper_frndint(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
ST0 = floatx80_round_to_int(ST0, &env->fp_status);
|
ST0 = floatx80_round_to_int(ST0, &env->fp_status);
|
||||||
merge_exception_flags(env, old_flags);
|
merge_exception_flags(env, old_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_fscale(CPUX86State *env)
|
void helper_fscale(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t old_flags = save_exception_flags(env);
|
int old_flags = save_exception_flags(env);
|
||||||
if (floatx80_invalid_encoding(ST1, &env->fp_status) ||
|
if (floatx80_invalid_encoding(ST1, &env->fp_status) ||
|
||||||
floatx80_invalid_encoding(ST0, &env->fp_status)) {
|
floatx80_invalid_encoding(ST0, &env->fp_status)) {
|
||||||
float_raise(float_flag_invalid, &env->fp_status);
|
float_raise(float_flag_invalid, &env->fp_status);
|
||||||
|
@ -2369,7 +2369,7 @@ void helper_fscale(CPUX86State *env)
|
||||||
} else {
|
} else {
|
||||||
int n;
|
int n;
|
||||||
FloatX80RoundPrec save = env->fp_status.floatx80_rounding_precision;
|
FloatX80RoundPrec save = env->fp_status.floatx80_rounding_precision;
|
||||||
uint8_t save_flags = get_float_exception_flags(&env->fp_status);
|
int save_flags = get_float_exception_flags(&env->fp_status);
|
||||||
set_float_exception_flags(0, &env->fp_status);
|
set_float_exception_flags(0, &env->fp_status);
|
||||||
n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
|
n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
|
||||||
set_float_exception_flags(save_flags, &env->fp_status);
|
set_float_exception_flags(save_flags, &env->fp_status);
|
||||||
|
@ -3254,6 +3254,7 @@ void update_mxcsr_status(CPUX86State *env)
|
||||||
|
|
||||||
/* Set exception flags. */
|
/* Set exception flags. */
|
||||||
set_float_exception_flags((mxcsr & FPUS_IE ? float_flag_invalid : 0) |
|
set_float_exception_flags((mxcsr & FPUS_IE ? float_flag_invalid : 0) |
|
||||||
|
(mxcsr & FPUS_DE ? float_flag_input_denormal_used : 0) |
|
||||||
(mxcsr & FPUS_ZE ? float_flag_divbyzero : 0) |
|
(mxcsr & FPUS_ZE ? float_flag_divbyzero : 0) |
|
||||||
(mxcsr & FPUS_OE ? float_flag_overflow : 0) |
|
(mxcsr & FPUS_OE ? float_flag_overflow : 0) |
|
||||||
(mxcsr & FPUS_UE ? float_flag_underflow : 0) |
|
(mxcsr & FPUS_UE ? float_flag_underflow : 0) |
|
||||||
|
@ -3269,15 +3270,9 @@ void update_mxcsr_status(CPUX86State *env)
|
||||||
|
|
||||||
void update_mxcsr_from_sse_status(CPUX86State *env)
|
void update_mxcsr_from_sse_status(CPUX86State *env)
|
||||||
{
|
{
|
||||||
uint8_t flags = get_float_exception_flags(&env->sse_status);
|
int flags = get_float_exception_flags(&env->sse_status);
|
||||||
/*
|
|
||||||
* The MXCSR denormal flag has opposite semantics to
|
|
||||||
* float_flag_input_denormal_flushed (the softfloat code sets that flag
|
|
||||||
* only when flushing input denormals to zero, but SSE sets it
|
|
||||||
* only when not flushing them to zero), so is not converted
|
|
||||||
* here.
|
|
||||||
*/
|
|
||||||
env->mxcsr |= ((flags & float_flag_invalid ? FPUS_IE : 0) |
|
env->mxcsr |= ((flags & float_flag_invalid ? FPUS_IE : 0) |
|
||||||
|
(flags & float_flag_input_denormal_used ? FPUS_DE : 0) |
|
||||||
(flags & float_flag_divbyzero ? FPUS_ZE : 0) |
|
(flags & float_flag_divbyzero ? FPUS_ZE : 0) |
|
||||||
(flags & float_flag_overflow ? FPUS_OE : 0) |
|
(flags & float_flag_overflow ? FPUS_OE : 0) |
|
||||||
(flags & float_flag_underflow ? FPUS_UE : 0) |
|
(flags & float_flag_underflow ? FPUS_UE : 0) |
|
||||||
|
|
|
@ -79,14 +79,21 @@ static testdata tests[] = {
|
||||||
/*
|
/*
|
||||||
* Flushing of denormal outputs to zero should also happen after
|
* Flushing of denormal outputs to zero should also happen after
|
||||||
* rounding, so setting FTZ should not affect the result or the flags.
|
* rounding, so setting FTZ should not affect the result or the flags.
|
||||||
* QEMU currently does not emulate this correctly because we do the
|
|
||||||
* flush-to-zero check before rounding, so we incorrectly produce a
|
|
||||||
* zero result and set Underflow as well as Precision.
|
|
||||||
*/
|
*/
|
||||||
#ifdef ENABLE_FAILING_TESTS
|
|
||||||
{ 0x3fdfffffffffffff, 0x001fffffffffffff, 0x801fffffffffffff, true,
|
{ 0x3fdfffffffffffff, 0x001fffffffffffff, 0x801fffffffffffff, true,
|
||||||
0x8010000000000000, 0x20 }, /* Enabling FTZ shouldn't change flags */
|
0x8010000000000000, 0x20 }, /* Enabling FTZ shouldn't change flags */
|
||||||
#endif
|
/*
|
||||||
|
* normal * 0 + a denormal. With FTZ disabled this gives an exact
|
||||||
|
* result (equal to the input denormal) that has consumed the denormal.
|
||||||
|
*/
|
||||||
|
{ 0x3cc8000000000000, 0x0000000000000000, 0x8008000000000000, false,
|
||||||
|
0x8008000000000000, 0x2 }, /* Denormal */
|
||||||
|
/*
|
||||||
|
* With FTZ enabled, this consumes the denormal, returns zero (because
|
||||||
|
* flushed) and indicates also Underflow and Precision.
|
||||||
|
*/
|
||||||
|
{ 0x3cc8000000000000, 0x0000000000000000, 0x8008000000000000, true,
|
||||||
|
0x8000000000000000, 0x32 }, /* Precision, Underflow, Denormal */
|
||||||
};
|
};
|
||||||
|
|
||||||
int main(void)
|
int main(void)
|
||||||
|
|
|
@ -305,7 +305,9 @@ static void mcs_mutex_lock(void)
|
||||||
prev = qatomic_xchg(&mutex_head, id);
|
prev = qatomic_xchg(&mutex_head, id);
|
||||||
if (prev != -1) {
|
if (prev != -1) {
|
||||||
qatomic_set(&nodes[prev].next, id);
|
qatomic_set(&nodes[prev].next, id);
|
||||||
qemu_futex_wait(&nodes[id].locked, 1);
|
while (qatomic_read(&nodes[id].locked) == 1) {
|
||||||
|
qemu_futex_wait(&nodes[id].locked, 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -328,7 +330,7 @@ static void mcs_mutex_unlock(void)
|
||||||
/* Wake up the next in line. */
|
/* Wake up the next in line. */
|
||||||
next = qatomic_read(&nodes[id].next);
|
next = qatomic_read(&nodes[id].next);
|
||||||
nodes[next].locked = 0;
|
nodes[next].locked = 0;
|
||||||
qemu_futex_wake(&nodes[next].locked, 1);
|
qemu_futex_wake_single(&nodes[next].locked);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void test_multi_fair_mutex_entry(void *opaque)
|
static void test_multi_fair_mutex_entry(void *opaque)
|
||||||
|
|
20
util/error.c
20
util/error.c
|
@ -15,15 +15,7 @@
|
||||||
#include "qemu/osdep.h"
|
#include "qemu/osdep.h"
|
||||||
#include "qapi/error.h"
|
#include "qapi/error.h"
|
||||||
#include "qemu/error-report.h"
|
#include "qemu/error-report.h"
|
||||||
|
#include "qapi/error-internal.h"
|
||||||
struct Error
|
|
||||||
{
|
|
||||||
char *msg;
|
|
||||||
ErrorClass err_class;
|
|
||||||
const char *src, *func;
|
|
||||||
int line;
|
|
||||||
GString *hint;
|
|
||||||
};
|
|
||||||
|
|
||||||
Error *error_abort;
|
Error *error_abort;
|
||||||
Error *error_fatal;
|
Error *error_fatal;
|
||||||
|
@ -32,8 +24,13 @@ Error *error_warn;
|
||||||
static void error_handle(Error **errp, Error *err)
|
static void error_handle(Error **errp, Error *err)
|
||||||
{
|
{
|
||||||
if (errp == &error_abort) {
|
if (errp == &error_abort) {
|
||||||
fprintf(stderr, "Unexpected error in %s() at %s:%d:\n",
|
if (err->func) {
|
||||||
err->func, err->src, err->line);
|
fprintf(stderr, "Unexpected error in %s() at %.*s:%d:\n",
|
||||||
|
err->func, err->src_len, err->src, err->line);
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "Unexpected error at %.*s:%d:\n",
|
||||||
|
err->src_len, err->src, err->line);
|
||||||
|
}
|
||||||
error_report("%s", error_get_pretty(err));
|
error_report("%s", error_get_pretty(err));
|
||||||
if (err->hint) {
|
if (err->hint) {
|
||||||
error_printf("%s", err->hint->str);
|
error_printf("%s", err->hint->str);
|
||||||
|
@ -75,6 +72,7 @@ static void error_setv(Error **errp,
|
||||||
g_free(msg);
|
g_free(msg);
|
||||||
}
|
}
|
||||||
err->err_class = err_class;
|
err->err_class = err_class;
|
||||||
|
err->src_len = -1;
|
||||||
err->src = src;
|
err->src = src;
|
||||||
err->line = line;
|
err->line = line;
|
||||||
err->func = func;
|
err->func = func;
|
||||||
|
|
171
util/event.c
Normal file
171
util/event.c
Normal file
|
@ -0,0 +1,171 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||||
|
|
||||||
|
#include "qemu/osdep.h"
|
||||||
|
#include "qemu/thread.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Valid transitions:
|
||||||
|
* - FREE -> SET (qemu_event_set)
|
||||||
|
* - BUSY -> SET (qemu_event_set)
|
||||||
|
* - SET -> FREE (qemu_event_reset)
|
||||||
|
* - FREE -> BUSY (qemu_event_wait)
|
||||||
|
*
|
||||||
|
* With futex, the waking and blocking operations follow
|
||||||
|
* BUSY -> SET and FREE -> BUSY, respectively.
|
||||||
|
*
|
||||||
|
* Without futex, BUSY -> SET and FREE -> BUSY never happen. Instead, the waking
|
||||||
|
* operation follows FREE -> SET and the blocking operation will happen in
|
||||||
|
* qemu_event_wait() if the event is not SET.
|
||||||
|
*
|
||||||
|
* SET->BUSY does not happen (it can be observed from the outside but
|
||||||
|
* it really is SET->FREE->BUSY).
|
||||||
|
*
|
||||||
|
* busy->free provably cannot happen; to enforce it, the set->free transition
|
||||||
|
* is done with an OR, which becomes a no-op if the event has concurrently
|
||||||
|
* transitioned to free or busy.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define EV_SET 0
|
||||||
|
#define EV_FREE 1
|
||||||
|
#define EV_BUSY -1
|
||||||
|
|
||||||
|
void qemu_event_init(QemuEvent *ev, bool init)
|
||||||
|
{
|
||||||
|
#ifndef HAVE_FUTEX
|
||||||
|
pthread_mutex_init(&ev->lock, NULL);
|
||||||
|
pthread_cond_init(&ev->cond, NULL);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ev->value = (init ? EV_SET : EV_FREE);
|
||||||
|
ev->initialized = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void qemu_event_destroy(QemuEvent *ev)
|
||||||
|
{
|
||||||
|
assert(ev->initialized);
|
||||||
|
ev->initialized = false;
|
||||||
|
#ifndef HAVE_FUTEX
|
||||||
|
pthread_mutex_destroy(&ev->lock);
|
||||||
|
pthread_cond_destroy(&ev->cond);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void qemu_event_set(QemuEvent *ev)
|
||||||
|
{
|
||||||
|
assert(ev->initialized);
|
||||||
|
|
||||||
|
#ifdef HAVE_FUTEX
|
||||||
|
/*
|
||||||
|
* Pairs with both qemu_event_reset() and qemu_event_wait().
|
||||||
|
*
|
||||||
|
* qemu_event_set has release semantics, but because it *loads*
|
||||||
|
* ev->value we need a full memory barrier here.
|
||||||
|
*/
|
||||||
|
smp_mb();
|
||||||
|
if (qatomic_read(&ev->value) != EV_SET) {
|
||||||
|
int old = qatomic_xchg(&ev->value, EV_SET);
|
||||||
|
|
||||||
|
/* Pairs with memory barrier in kernel futex_wait system call. */
|
||||||
|
smp_mb__after_rmw();
|
||||||
|
if (old == EV_BUSY) {
|
||||||
|
/* There were waiters, wake them up. */
|
||||||
|
qemu_futex_wake_all(ev);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
pthread_mutex_lock(&ev->lock);
|
||||||
|
/* Pairs with qemu_event_reset()'s load acquire. */
|
||||||
|
qatomic_store_release(&ev->value, EV_SET);
|
||||||
|
pthread_cond_broadcast(&ev->cond);
|
||||||
|
pthread_mutex_unlock(&ev->lock);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void qemu_event_reset(QemuEvent *ev)
|
||||||
|
{
|
||||||
|
assert(ev->initialized);
|
||||||
|
|
||||||
|
#ifdef HAVE_FUTEX
|
||||||
|
/*
|
||||||
|
* If there was a concurrent reset (or even reset+wait),
|
||||||
|
* do nothing. Otherwise change EV_SET->EV_FREE.
|
||||||
|
*/
|
||||||
|
qatomic_or(&ev->value, EV_FREE);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Order reset before checking the condition in the caller.
|
||||||
|
* Pairs with the first memory barrier in qemu_event_set().
|
||||||
|
*/
|
||||||
|
smp_mb__after_rmw();
|
||||||
|
#else
|
||||||
|
/*
|
||||||
|
* If futexes are not available, there are no EV_FREE->EV_BUSY
|
||||||
|
* transitions because wakeups are done entirely through the
|
||||||
|
* condition variable. Since qatomic_set() only writes EV_FREE,
|
||||||
|
* the load seems useless but in reality, the acquire synchronizes
|
||||||
|
* with qemu_event_set()'s store release: if qemu_event_reset()
|
||||||
|
* sees EV_SET here, then the caller will certainly see a
|
||||||
|
* successful condition and skip qemu_event_wait():
|
||||||
|
*
|
||||||
|
* done = 1; if (done == 0)
|
||||||
|
* qemu_event_set() { qemu_event_reset() {
|
||||||
|
* lock();
|
||||||
|
* ev->value = EV_SET -----> load ev->value
|
||||||
|
* ev->value = old value | EV_FREE
|
||||||
|
* cond_broadcast()
|
||||||
|
* unlock(); }
|
||||||
|
* } if (done == 0)
|
||||||
|
* // qemu_event_wait() not called
|
||||||
|
*/
|
||||||
|
qatomic_set(&ev->value, qatomic_load_acquire(&ev->value) | EV_FREE);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void qemu_event_wait(QemuEvent *ev)
|
||||||
|
{
|
||||||
|
assert(ev->initialized);
|
||||||
|
|
||||||
|
#ifdef HAVE_FUTEX
|
||||||
|
while (true) {
|
||||||
|
/*
|
||||||
|
* qemu_event_wait must synchronize with qemu_event_set even if it does
|
||||||
|
* not go down the slow path, so this load-acquire is needed that
|
||||||
|
* synchronizes with the first memory barrier in qemu_event_set().
|
||||||
|
*/
|
||||||
|
unsigned value = qatomic_load_acquire(&ev->value);
|
||||||
|
if (value == EV_SET) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (value == EV_FREE) {
|
||||||
|
/*
|
||||||
|
* Leave the event reset and tell qemu_event_set that there are
|
||||||
|
* waiters. No need to retry, because there cannot be a concurrent
|
||||||
|
* busy->free transition. After the CAS, the event will be either
|
||||||
|
* set or busy.
|
||||||
|
*
|
||||||
|
* This cmpxchg doesn't have particular ordering requirements if it
|
||||||
|
* succeeds (moving the store earlier can only cause
|
||||||
|
* qemu_event_set() to issue _more_ wakeups), the failing case needs
|
||||||
|
* acquire semantics like the load above.
|
||||||
|
*/
|
||||||
|
if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is the final check for a concurrent set, so it does need
|
||||||
|
* a smp_mb() pairing with the second barrier of qemu_event_set().
|
||||||
|
* The barrier is inside the FUTEX_WAIT system call.
|
||||||
|
*/
|
||||||
|
qemu_futex_wait(ev, EV_BUSY);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
pthread_mutex_lock(&ev->lock);
|
||||||
|
while (qatomic_read(&ev->value) != EV_SET) {
|
||||||
|
pthread_cond_wait(&ev->cond, &ev->lock);
|
||||||
|
}
|
||||||
|
pthread_mutex_unlock(&ev->lock);
|
||||||
|
#endif
|
||||||
|
}
|
|
@ -12,10 +12,11 @@
|
||||||
#include "qemu/atomic.h"
|
#include "qemu/atomic.h"
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
|
||||||
#ifdef CONFIG_LINUX
|
#ifdef HAVE_FUTEX
|
||||||
#include "qemu/futex.h"
|
|
||||||
|
|
||||||
/* On Linux, bits 0-1 are a futex-based lock, bits 2-31 are the counter.
|
/*
|
||||||
|
* When futex is available, bits 0-1 are a futex-based lock, bits 2-31 are the
|
||||||
|
* counter.
|
||||||
* For the mutex algorithm see Ulrich Drepper's "Futexes Are Tricky" (ok,
|
* For the mutex algorithm see Ulrich Drepper's "Futexes Are Tricky" (ok,
|
||||||
* this is not the most relaxing citation I could make...). It is similar
|
* this is not the most relaxing citation I could make...). It is similar
|
||||||
* to mutex2 in the paper.
|
* to mutex2 in the paper.
|
||||||
|
@ -106,7 +107,7 @@ static bool qemu_lockcnt_cmpxchg_or_wait(QemuLockCnt *lockcnt, int *val,
|
||||||
static void lockcnt_wake(QemuLockCnt *lockcnt)
|
static void lockcnt_wake(QemuLockCnt *lockcnt)
|
||||||
{
|
{
|
||||||
trace_lockcnt_futex_wake(lockcnt);
|
trace_lockcnt_futex_wake(lockcnt);
|
||||||
qemu_futex_wake(&lockcnt->count, 1);
|
qemu_futex_wake_single(&lockcnt->count);
|
||||||
}
|
}
|
||||||
|
|
||||||
void qemu_lockcnt_inc(QemuLockCnt *lockcnt)
|
void qemu_lockcnt_inc(QemuLockCnt *lockcnt)
|
||||||
|
|
|
@ -27,7 +27,7 @@ else
|
||||||
util_ss.add(files('event_notifier-win32.c'))
|
util_ss.add(files('event_notifier-win32.c'))
|
||||||
util_ss.add(files('oslib-win32.c'))
|
util_ss.add(files('oslib-win32.c'))
|
||||||
util_ss.add(files('qemu-thread-win32.c'))
|
util_ss.add(files('qemu-thread-win32.c'))
|
||||||
util_ss.add(winmm, pathcch)
|
util_ss.add(winmm, pathcch, synchronization)
|
||||||
endif
|
endif
|
||||||
util_ss.add(when: linux_io_uring, if_true: files('fdmon-io_uring.c'))
|
util_ss.add(when: linux_io_uring, if_true: files('fdmon-io_uring.c'))
|
||||||
if glib_has_gslice
|
if glib_has_gslice
|
||||||
|
@ -35,6 +35,7 @@ if glib_has_gslice
|
||||||
endif
|
endif
|
||||||
util_ss.add(files('defer-call.c'))
|
util_ss.add(files('defer-call.c'))
|
||||||
util_ss.add(files('envlist.c', 'path.c', 'module.c'))
|
util_ss.add(files('envlist.c', 'path.c', 'module.c'))
|
||||||
|
util_ss.add(files('event.c'))
|
||||||
util_ss.add(files('host-utils.c'))
|
util_ss.add(files('host-utils.c'))
|
||||||
util_ss.add(files('bitmap.c', 'bitops.c'))
|
util_ss.add(files('bitmap.c', 'bitops.c'))
|
||||||
util_ss.add(files('fifo8.c'))
|
util_ss.add(files('fifo8.c'))
|
||||||
|
|
|
@ -317,154 +317,6 @@ void qemu_sem_wait(QemuSemaphore *sem)
|
||||||
qemu_mutex_unlock(&sem->mutex);
|
qemu_mutex_unlock(&sem->mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __linux__
|
|
||||||
#include "qemu/futex.h"
|
|
||||||
#else
|
|
||||||
static inline void qemu_futex_wake(QemuEvent *ev, int n)
|
|
||||||
{
|
|
||||||
assert(ev->initialized);
|
|
||||||
pthread_mutex_lock(&ev->lock);
|
|
||||||
if (n == 1) {
|
|
||||||
pthread_cond_signal(&ev->cond);
|
|
||||||
} else {
|
|
||||||
pthread_cond_broadcast(&ev->cond);
|
|
||||||
}
|
|
||||||
pthread_mutex_unlock(&ev->lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void qemu_futex_wait(QemuEvent *ev, unsigned val)
|
|
||||||
{
|
|
||||||
assert(ev->initialized);
|
|
||||||
pthread_mutex_lock(&ev->lock);
|
|
||||||
if (ev->value == val) {
|
|
||||||
pthread_cond_wait(&ev->cond, &ev->lock);
|
|
||||||
}
|
|
||||||
pthread_mutex_unlock(&ev->lock);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Valid transitions:
|
|
||||||
* - free->set, when setting the event
|
|
||||||
* - busy->set, when setting the event, followed by qemu_futex_wake
|
|
||||||
* - set->free, when resetting the event
|
|
||||||
* - free->busy, when waiting
|
|
||||||
*
|
|
||||||
* set->busy does not happen (it can be observed from the outside but
|
|
||||||
* it really is set->free->busy).
|
|
||||||
*
|
|
||||||
* busy->free provably cannot happen; to enforce it, the set->free transition
|
|
||||||
* is done with an OR, which becomes a no-op if the event has concurrently
|
|
||||||
* transitioned to free or busy.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define EV_SET 0
|
|
||||||
#define EV_FREE 1
|
|
||||||
#define EV_BUSY -1
|
|
||||||
|
|
||||||
void qemu_event_init(QemuEvent *ev, bool init)
|
|
||||||
{
|
|
||||||
#ifndef __linux__
|
|
||||||
pthread_mutex_init(&ev->lock, NULL);
|
|
||||||
pthread_cond_init(&ev->cond, NULL);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
ev->value = (init ? EV_SET : EV_FREE);
|
|
||||||
ev->initialized = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void qemu_event_destroy(QemuEvent *ev)
|
|
||||||
{
|
|
||||||
assert(ev->initialized);
|
|
||||||
ev->initialized = false;
|
|
||||||
#ifndef __linux__
|
|
||||||
pthread_mutex_destroy(&ev->lock);
|
|
||||||
pthread_cond_destroy(&ev->cond);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
void qemu_event_set(QemuEvent *ev)
|
|
||||||
{
|
|
||||||
assert(ev->initialized);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Pairs with both qemu_event_reset() and qemu_event_wait().
|
|
||||||
*
|
|
||||||
* qemu_event_set has release semantics, but because it *loads*
|
|
||||||
* ev->value we need a full memory barrier here.
|
|
||||||
*/
|
|
||||||
smp_mb();
|
|
||||||
if (qatomic_read(&ev->value) != EV_SET) {
|
|
||||||
int old = qatomic_xchg(&ev->value, EV_SET);
|
|
||||||
|
|
||||||
/* Pairs with memory barrier in kernel futex_wait system call. */
|
|
||||||
smp_mb__after_rmw();
|
|
||||||
if (old == EV_BUSY) {
|
|
||||||
/* There were waiters, wake them up. */
|
|
||||||
qemu_futex_wake(ev, INT_MAX);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void qemu_event_reset(QemuEvent *ev)
|
|
||||||
{
|
|
||||||
assert(ev->initialized);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If there was a concurrent reset (or even reset+wait),
|
|
||||||
* do nothing. Otherwise change EV_SET->EV_FREE.
|
|
||||||
*/
|
|
||||||
qatomic_or(&ev->value, EV_FREE);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Order reset before checking the condition in the caller.
|
|
||||||
* Pairs with the first memory barrier in qemu_event_set().
|
|
||||||
*/
|
|
||||||
smp_mb__after_rmw();
|
|
||||||
}
|
|
||||||
|
|
||||||
void qemu_event_wait(QemuEvent *ev)
|
|
||||||
{
|
|
||||||
unsigned value;
|
|
||||||
|
|
||||||
assert(ev->initialized);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* qemu_event_wait must synchronize with qemu_event_set even if it does
|
|
||||||
* not go down the slow path, so this load-acquire is needed that
|
|
||||||
* synchronizes with the first memory barrier in qemu_event_set().
|
|
||||||
*
|
|
||||||
* If we do go down the slow path, there is no requirement at all: we
|
|
||||||
* might miss a qemu_event_set() here but ultimately the memory barrier in
|
|
||||||
* qemu_futex_wait() will ensure the check is done correctly.
|
|
||||||
*/
|
|
||||||
value = qatomic_load_acquire(&ev->value);
|
|
||||||
if (value != EV_SET) {
|
|
||||||
if (value == EV_FREE) {
|
|
||||||
/*
|
|
||||||
* Leave the event reset and tell qemu_event_set that there are
|
|
||||||
* waiters. No need to retry, because there cannot be a concurrent
|
|
||||||
* busy->free transition. After the CAS, the event will be either
|
|
||||||
* set or busy.
|
|
||||||
*
|
|
||||||
* This cmpxchg doesn't have particular ordering requirements if it
|
|
||||||
* succeeds (moving the store earlier can only cause qemu_event_set()
|
|
||||||
* to issue _more_ wakeups), the failing case needs acquire semantics
|
|
||||||
* like the load above.
|
|
||||||
*/
|
|
||||||
if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is the final check for a concurrent set, so it does need
|
|
||||||
* a smp_mb() pairing with the second barrier of qemu_event_set().
|
|
||||||
* The barrier is inside the FUTEX_WAIT system call.
|
|
||||||
*/
|
|
||||||
qemu_futex_wait(ev, EV_BUSY);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static __thread NotifierList thread_exit;
|
static __thread NotifierList thread_exit;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -231,135 +231,6 @@ void qemu_sem_wait(QemuSemaphore *sem)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Wrap a Win32 manual-reset event with a fast userspace path. The idea
|
|
||||||
* is to reset the Win32 event lazily, as part of a test-reset-test-wait
|
|
||||||
* sequence. Such a sequence is, indeed, how QemuEvents are used by
|
|
||||||
* RCU and other subsystems!
|
|
||||||
*
|
|
||||||
* Valid transitions:
|
|
||||||
* - free->set, when setting the event
|
|
||||||
* - busy->set, when setting the event, followed by SetEvent
|
|
||||||
* - set->free, when resetting the event
|
|
||||||
* - free->busy, when waiting
|
|
||||||
*
|
|
||||||
* set->busy does not happen (it can be observed from the outside but
|
|
||||||
* it really is set->free->busy).
|
|
||||||
*
|
|
||||||
* busy->free provably cannot happen; to enforce it, the set->free transition
|
|
||||||
* is done with an OR, which becomes a no-op if the event has concurrently
|
|
||||||
* transitioned to free or busy (and is faster than cmpxchg).
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define EV_SET 0
|
|
||||||
#define EV_FREE 1
|
|
||||||
#define EV_BUSY -1
|
|
||||||
|
|
||||||
void qemu_event_init(QemuEvent *ev, bool init)
|
|
||||||
{
|
|
||||||
/* Manual reset. */
|
|
||||||
ev->event = CreateEvent(NULL, TRUE, TRUE, NULL);
|
|
||||||
ev->value = (init ? EV_SET : EV_FREE);
|
|
||||||
ev->initialized = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void qemu_event_destroy(QemuEvent *ev)
|
|
||||||
{
|
|
||||||
assert(ev->initialized);
|
|
||||||
ev->initialized = false;
|
|
||||||
CloseHandle(ev->event);
|
|
||||||
}
|
|
||||||
|
|
||||||
void qemu_event_set(QemuEvent *ev)
|
|
||||||
{
|
|
||||||
assert(ev->initialized);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Pairs with both qemu_event_reset() and qemu_event_wait().
|
|
||||||
*
|
|
||||||
* qemu_event_set has release semantics, but because it *loads*
|
|
||||||
* ev->value we need a full memory barrier here.
|
|
||||||
*/
|
|
||||||
smp_mb();
|
|
||||||
if (qatomic_read(&ev->value) != EV_SET) {
|
|
||||||
int old = qatomic_xchg(&ev->value, EV_SET);
|
|
||||||
|
|
||||||
/* Pairs with memory barrier after ResetEvent. */
|
|
||||||
smp_mb__after_rmw();
|
|
||||||
if (old == EV_BUSY) {
|
|
||||||
/* There were waiters, wake them up. */
|
|
||||||
SetEvent(ev->event);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void qemu_event_reset(QemuEvent *ev)
|
|
||||||
{
|
|
||||||
assert(ev->initialized);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If there was a concurrent reset (or even reset+wait),
|
|
||||||
* do nothing. Otherwise change EV_SET->EV_FREE.
|
|
||||||
*/
|
|
||||||
qatomic_or(&ev->value, EV_FREE);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Order reset before checking the condition in the caller.
|
|
||||||
* Pairs with the first memory barrier in qemu_event_set().
|
|
||||||
*/
|
|
||||||
smp_mb__after_rmw();
|
|
||||||
}
|
|
||||||
|
|
||||||
void qemu_event_wait(QemuEvent *ev)
|
|
||||||
{
|
|
||||||
unsigned value;
|
|
||||||
|
|
||||||
assert(ev->initialized);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* qemu_event_wait must synchronize with qemu_event_set even if it does
|
|
||||||
* not go down the slow path, so this load-acquire is needed that
|
|
||||||
* synchronizes with the first memory barrier in qemu_event_set().
|
|
||||||
*
|
|
||||||
* If we do go down the slow path, there is no requirement at all: we
|
|
||||||
* might miss a qemu_event_set() here but ultimately the memory barrier in
|
|
||||||
* qemu_futex_wait() will ensure the check is done correctly.
|
|
||||||
*/
|
|
||||||
value = qatomic_load_acquire(&ev->value);
|
|
||||||
if (value != EV_SET) {
|
|
||||||
if (value == EV_FREE) {
|
|
||||||
/*
|
|
||||||
* Here the underlying kernel event is reset, but qemu_event_set is
|
|
||||||
* not yet going to call SetEvent. However, there will be another
|
|
||||||
* check for EV_SET below when setting EV_BUSY. At that point it
|
|
||||||
* is safe to call WaitForSingleObject.
|
|
||||||
*/
|
|
||||||
ResetEvent(ev->event);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* It is not clear whether ResetEvent provides this barrier; kernel
|
|
||||||
* APIs (KeResetEvent/KeClearEvent) do not. Better safe than sorry!
|
|
||||||
*/
|
|
||||||
smp_mb();
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Leave the event reset and tell qemu_event_set that there are
|
|
||||||
* waiters. No need to retry, because there cannot be a concurrent
|
|
||||||
* busy->free transition. After the CAS, the event will be either
|
|
||||||
* set or busy.
|
|
||||||
*/
|
|
||||||
if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* ev->value is now EV_BUSY. Since we didn't observe EV_SET,
|
|
||||||
* qemu_event_set() must observe EV_BUSY and call SetEvent().
|
|
||||||
*/
|
|
||||||
WaitForSingleObject(ev->event, INFINITE);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct QemuThreadData {
|
struct QemuThreadData {
|
||||||
/* Passed to win32_start_routine. */
|
/* Passed to win32_start_routine. */
|
||||||
void *(*start_routine)(void *);
|
void *(*start_routine)(void *);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue