mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-01 23:03:54 -06:00
block: remove AioContext locking
This is the big patch that removes aio_context_acquire()/aio_context_release() from the block layer and affected block layer users. There isn't a clean way to split this patch and the reviewers are likely the same group of people, so I decided to do it in one patch. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Paul Durrant <paul@xen.org> Message-ID: <20231205182011.1976568-7-stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
6bc30f1949
commit
b49f4755c7
41 changed files with 104 additions and 1169 deletions
|
@ -228,7 +228,6 @@ static void cancel_common(CancelJob *s)
|
|||
BlockJob *job = &s->common;
|
||||
BlockBackend *blk = s->blk;
|
||||
JobStatus sts = job->job.status;
|
||||
AioContext *ctx = job->job.aio_context;
|
||||
|
||||
job_cancel_sync(&job->job, true);
|
||||
WITH_JOB_LOCK_GUARD() {
|
||||
|
@ -240,9 +239,7 @@ static void cancel_common(CancelJob *s)
|
|||
job_unref_locked(&job->job);
|
||||
}
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
destroy_blk(blk);
|
||||
aio_context_release(ctx);
|
||||
|
||||
}
|
||||
|
||||
|
@ -391,132 +388,6 @@ static void test_cancel_concluded(void)
|
|||
cancel_common(s);
|
||||
}
|
||||
|
||||
/* (See test_yielding_driver for the job description) */
|
||||
typedef struct YieldingJob {
|
||||
BlockJob common;
|
||||
bool should_complete;
|
||||
} YieldingJob;
|
||||
|
||||
static void yielding_job_complete(Job *job, Error **errp)
|
||||
{
|
||||
YieldingJob *s = container_of(job, YieldingJob, common.job);
|
||||
s->should_complete = true;
|
||||
job_enter(job);
|
||||
}
|
||||
|
||||
static int coroutine_fn yielding_job_run(Job *job, Error **errp)
|
||||
{
|
||||
YieldingJob *s = container_of(job, YieldingJob, common.job);
|
||||
|
||||
job_transition_to_ready(job);
|
||||
|
||||
while (!s->should_complete) {
|
||||
job_yield(job);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This job transitions immediately to the READY state, and then
|
||||
* yields until it is to complete.
|
||||
*/
|
||||
static const BlockJobDriver test_yielding_driver = {
|
||||
.job_driver = {
|
||||
.instance_size = sizeof(YieldingJob),
|
||||
.free = block_job_free,
|
||||
.user_resume = block_job_user_resume,
|
||||
.run = yielding_job_run,
|
||||
.complete = yielding_job_complete,
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* Test that job_complete_locked() works even on jobs that are in a paused
|
||||
* state (i.e., STANDBY).
|
||||
*
|
||||
* To do this, run YieldingJob in an IO thread, get it into the READY
|
||||
* state, then have a drained section. Before ending the section,
|
||||
* acquire the context so the job will not be entered and will thus
|
||||
* remain on STANDBY.
|
||||
*
|
||||
* job_complete_locked() should still work without error.
|
||||
*
|
||||
* Note that on the QMP interface, it is impossible to lock an IO
|
||||
* thread before a drained section ends. In practice, the
|
||||
* bdrv_drain_all_end() and the aio_context_acquire() will be
|
||||
* reversed. However, that makes for worse reproducibility here:
|
||||
* Sometimes, the job would no longer be in STANDBY then but already
|
||||
* be started. We cannot prevent that, because the IO thread runs
|
||||
* concurrently. We can only prevent it by taking the lock before
|
||||
* ending the drained section, so we do that.
|
||||
*
|
||||
* (You can reverse the order of operations and most of the time the
|
||||
* test will pass, but sometimes the assert(status == STANDBY) will
|
||||
* fail.)
|
||||
*/
|
||||
static void test_complete_in_standby(void)
|
||||
{
|
||||
BlockBackend *blk;
|
||||
IOThread *iothread;
|
||||
AioContext *ctx;
|
||||
Job *job;
|
||||
BlockJob *bjob;
|
||||
|
||||
/* Create a test drive, move it to an IO thread */
|
||||
blk = create_blk(NULL);
|
||||
iothread = iothread_new();
|
||||
|
||||
ctx = iothread_get_aio_context(iothread);
|
||||
blk_set_aio_context(blk, ctx, &error_abort);
|
||||
|
||||
/* Create our test job */
|
||||
bjob = mk_job(blk, "job", &test_yielding_driver, true,
|
||||
JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS);
|
||||
job = &bjob->job;
|
||||
assert_job_status_is(job, JOB_STATUS_CREATED);
|
||||
|
||||
/* Wait for the job to become READY */
|
||||
job_start(job);
|
||||
/*
|
||||
* Here we are waiting for the status to change, so don't bother
|
||||
* protecting the read every time.
|
||||
*/
|
||||
AIO_WAIT_WHILE_UNLOCKED(ctx, job->status != JOB_STATUS_READY);
|
||||
|
||||
/* Begin the drained section, pausing the job */
|
||||
bdrv_drain_all_begin();
|
||||
assert_job_status_is(job, JOB_STATUS_STANDBY);
|
||||
|
||||
/* Lock the IO thread to prevent the job from being run */
|
||||
aio_context_acquire(ctx);
|
||||
/* This will schedule the job to resume it */
|
||||
bdrv_drain_all_end();
|
||||
aio_context_release(ctx);
|
||||
|
||||
WITH_JOB_LOCK_GUARD() {
|
||||
/* But the job cannot run, so it will remain on standby */
|
||||
assert(job->status == JOB_STATUS_STANDBY);
|
||||
|
||||
/* Even though the job is on standby, this should work */
|
||||
job_complete_locked(job, &error_abort);
|
||||
|
||||
/* The test is done now, clean up. */
|
||||
job_finish_sync_locked(job, NULL, &error_abort);
|
||||
assert(job->status == JOB_STATUS_PENDING);
|
||||
|
||||
job_finalize_locked(job, &error_abort);
|
||||
assert(job->status == JOB_STATUS_CONCLUDED);
|
||||
|
||||
job_dismiss_locked(&job, &error_abort);
|
||||
}
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
destroy_blk(blk);
|
||||
aio_context_release(ctx);
|
||||
iothread_join(iothread);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
qemu_init_main_loop(&error_abort);
|
||||
|
@ -531,13 +402,5 @@ int main(int argc, char **argv)
|
|||
g_test_add_func("/blockjob/cancel/standby", test_cancel_standby);
|
||||
g_test_add_func("/blockjob/cancel/pending", test_cancel_pending);
|
||||
g_test_add_func("/blockjob/cancel/concluded", test_cancel_concluded);
|
||||
|
||||
/*
|
||||
* This test is flaky and sometimes fails in CI and otherwise:
|
||||
* don't run unless user opts in via environment variable.
|
||||
*/
|
||||
if (getenv("QEMU_TEST_FLAKY_TESTS")) {
|
||||
g_test_add_func("/blockjob/complete_in_standby", test_complete_in_standby);
|
||||
}
|
||||
return g_test_run();
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue