mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-06 09:13:55 -06:00
block: drop aio functions that operate on the main AioContext
The main AioContext should be accessed explicitly via qemu_get_aio_context(). Most of the time, using it is not the right thing to do. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
b47ec2c456
commit
87f68d3182
7 changed files with 12 additions and 46 deletions
|
@ -220,7 +220,7 @@ bool aio_poll(AioContext *ctx, bool blocking);
|
|||
#ifdef CONFIG_POSIX
|
||||
/* Register a file descriptor and associated callbacks. Behaves very similarly
|
||||
* to qemu_set_fd_handler2. Unlike qemu_set_fd_handler2, these callbacks will
|
||||
* be invoked when using qemu_aio_wait().
|
||||
* be invoked when using aio_poll().
|
||||
*
|
||||
* Code that invokes AIO completion functions should rely on this function
|
||||
* instead of qemu_set_fd_handler[2].
|
||||
|
@ -234,7 +234,7 @@ void aio_set_fd_handler(AioContext *ctx,
|
|||
|
||||
/* Register an event notifier and associated callbacks. Behaves very similarly
|
||||
* to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
|
||||
* will be invoked when using qemu_aio_wait().
|
||||
* will be invoked when using aio_poll().
|
||||
*
|
||||
* Code that invokes AIO completion functions should rely on this function
|
||||
* instead of event_notifier_set_handler.
|
||||
|
@ -251,19 +251,6 @@ GSource *aio_get_g_source(AioContext *ctx);
|
|||
/* Return the ThreadPool bound to this AioContext */
|
||||
struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
|
||||
|
||||
/* Functions to operate on the main QEMU AioContext. */
|
||||
|
||||
bool qemu_aio_wait(void);
|
||||
void qemu_aio_set_event_notifier(EventNotifier *notifier,
|
||||
EventNotifierHandler *io_read);
|
||||
|
||||
#ifdef CONFIG_POSIX
|
||||
void qemu_aio_set_fd_handler(int fd,
|
||||
IOHandler *io_read,
|
||||
IOHandler *io_write,
|
||||
void *opaque);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* aio_timer_new:
|
||||
* @ctx: the aio context
|
||||
|
|
|
@ -74,7 +74,7 @@ struct BlockJob {
|
|||
* Set to true if the job should cancel itself. The flag must
|
||||
* always be tested just before toggling the busy flag from false
|
||||
* to true. After a job has been cancelled, it should only yield
|
||||
* if #qemu_aio_wait will ("sooner or later") reenter the coroutine.
|
||||
* if #aio_poll will ("sooner or later") reenter the coroutine.
|
||||
*/
|
||||
bool cancelled;
|
||||
|
||||
|
@ -87,7 +87,7 @@ struct BlockJob {
|
|||
/**
|
||||
* Set to false by the job while it is in a quiescent state, where
|
||||
* no I/O is pending and the job has yielded on any condition
|
||||
* that is not detected by #qemu_aio_wait, such as a timer.
|
||||
* that is not detected by #aio_poll, such as a timer.
|
||||
*/
|
||||
bool busy;
|
||||
|
||||
|
|
|
@ -212,7 +212,7 @@ void coroutine_fn co_sleep_ns(QEMUClockType type, int64_t ns);
|
|||
* Yield the coroutine for a given duration
|
||||
*
|
||||
* Behaves similarly to co_sleep_ns(), but the sleeping coroutine will be
|
||||
* resumed when using qemu_aio_wait().
|
||||
* resumed when using aio_poll().
|
||||
*/
|
||||
void coroutine_fn co_aio_sleep_ns(AioContext *ctx, QEMUClockType type,
|
||||
int64_t ns);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue