mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-07 09:43:56 -06:00
block: Convert bs->backing_hd to BdrvChild
This is the final step in converting all of the BlockDriverState pointers that block drivers use to BdrvChild. After this patch, bs->children contains the full list of child nodes that are referenced by a given BDS, and these children are only referenced through BdrvChild, so that updating the pointer in there is enough for changing edges in the graph. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Alberto Garcia <berto@igalia.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
b26e90f56a
commit
760e006384
14 changed files with 115 additions and 109 deletions
24
block/io.c
24
block/io.c
|
@ -170,24 +170,24 @@ void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
|
|||
bs->bl.opt_mem_alignment = getpagesize();
|
||||
}
|
||||
|
||||
if (bs->backing_hd) {
|
||||
bdrv_refresh_limits(bs->backing_hd, &local_err);
|
||||
if (bs->backing) {
|
||||
bdrv_refresh_limits(bs->backing->bs, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
bs->bl.opt_transfer_length =
|
||||
MAX(bs->bl.opt_transfer_length,
|
||||
bs->backing_hd->bl.opt_transfer_length);
|
||||
bs->backing->bs->bl.opt_transfer_length);
|
||||
bs->bl.max_transfer_length =
|
||||
MIN_NON_ZERO(bs->bl.max_transfer_length,
|
||||
bs->backing_hd->bl.max_transfer_length);
|
||||
bs->backing->bs->bl.max_transfer_length);
|
||||
bs->bl.opt_mem_alignment =
|
||||
MAX(bs->bl.opt_mem_alignment,
|
||||
bs->backing_hd->bl.opt_mem_alignment);
|
||||
bs->backing->bs->bl.opt_mem_alignment);
|
||||
bs->bl.min_mem_alignment =
|
||||
MAX(bs->bl.min_mem_alignment,
|
||||
bs->backing_hd->bl.min_mem_alignment);
|
||||
bs->backing->bs->bl.min_mem_alignment);
|
||||
}
|
||||
|
||||
/* Then let the driver override it */
|
||||
|
@ -227,7 +227,7 @@ static bool bdrv_requests_pending(BlockDriverState *bs)
|
|||
if (bs->file && bdrv_requests_pending(bs->file->bs)) {
|
||||
return true;
|
||||
}
|
||||
if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) {
|
||||
if (bs->backing && bdrv_requests_pending(bs->backing->bs)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -1505,8 +1505,8 @@ static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
|
|||
} else {
|
||||
if (bdrv_unallocated_blocks_are_zero(bs)) {
|
||||
ret |= BDRV_BLOCK_ZERO;
|
||||
} else if (bs->backing_hd) {
|
||||
BlockDriverState *bs2 = bs->backing_hd;
|
||||
} else if (bs->backing) {
|
||||
BlockDriverState *bs2 = bs->backing->bs;
|
||||
int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
|
||||
if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
|
||||
ret |= BDRV_BLOCK_ZERO;
|
||||
|
@ -1551,7 +1551,7 @@ static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs,
|
|||
int64_t ret = 0;
|
||||
|
||||
assert(bs != base);
|
||||
for (p = bs; p != base; p = p->backing_hd) {
|
||||
for (p = bs; p != base; p = backing_bs(p)) {
|
||||
ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum);
|
||||
if (ret < 0 || ret & BDRV_BLOCK_ALLOCATED) {
|
||||
break;
|
||||
|
@ -1614,7 +1614,7 @@ int64_t bdrv_get_block_status(BlockDriverState *bs,
|
|||
int64_t sector_num,
|
||||
int nb_sectors, int *pnum)
|
||||
{
|
||||
return bdrv_get_block_status_above(bs, bs->backing_hd,
|
||||
return bdrv_get_block_status_above(bs, backing_bs(bs),
|
||||
sector_num, nb_sectors, pnum);
|
||||
}
|
||||
|
||||
|
@ -1672,7 +1672,7 @@ int bdrv_is_allocated_above(BlockDriverState *top,
|
|||
n = pnum_inter;
|
||||
}
|
||||
|
||||
intermediate = intermediate->backing_hd;
|
||||
intermediate = backing_bs(intermediate);
|
||||
}
|
||||
|
||||
*pnum = n;
|
||||
|
|
|
@ -371,7 +371,7 @@ static void mirror_exit(BlockJob *job, void *opaque)
|
|||
if (s->common.driver->job_type == BLOCK_JOB_TYPE_COMMIT) {
|
||||
/* drop the bs loop chain formed by the swap: break the loop then
|
||||
* trigger the unref from the top one */
|
||||
BlockDriverState *p = s->base->backing_hd;
|
||||
BlockDriverState *p = backing_bs(s->base);
|
||||
bdrv_set_backing_hd(s->base, NULL);
|
||||
bdrv_unref(p);
|
||||
}
|
||||
|
@ -431,7 +431,7 @@ static void coroutine_fn mirror_run(void *opaque)
|
|||
*/
|
||||
bdrv_get_backing_filename(s->target, backing_filename,
|
||||
sizeof(backing_filename));
|
||||
if (backing_filename[0] && !s->target->backing_hd) {
|
||||
if (backing_filename[0] && !s->target->backing) {
|
||||
ret = bdrv_get_info(s->target, &bdi);
|
||||
if (ret < 0) {
|
||||
goto immediate_exit;
|
||||
|
@ -766,7 +766,7 @@ void mirror_start(BlockDriverState *bs, BlockDriverState *target,
|
|||
return;
|
||||
}
|
||||
is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
|
||||
base = mode == MIRROR_SYNC_MODE_TOP ? bs->backing_hd : NULL;
|
||||
base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
|
||||
mirror_start_job(bs, target, replaces,
|
||||
speed, granularity, buf_size,
|
||||
on_source_error, on_target_error, unmap, cb, opaque, errp,
|
||||
|
|
|
@ -110,8 +110,8 @@ BlockDeviceInfo *bdrv_block_device_info(BlockDriverState *bs, Error **errp)
|
|||
qapi_free_BlockDeviceInfo(info);
|
||||
return NULL;
|
||||
}
|
||||
if (bs0->drv && bs0->backing_hd) {
|
||||
bs0 = bs0->backing_hd;
|
||||
if (bs0->drv && bs0->backing) {
|
||||
bs0 = bs0->backing->bs;
|
||||
(*p_image_info)->has_backing_image = true;
|
||||
p_image_info = &((*p_image_info)->backing_image);
|
||||
} else {
|
||||
|
@ -362,9 +362,9 @@ static BlockStats *bdrv_query_stats(const BlockDriverState *bs,
|
|||
s->parent = bdrv_query_stats(bs->file->bs, query_backing);
|
||||
}
|
||||
|
||||
if (query_backing && bs->backing_hd) {
|
||||
if (query_backing && bs->backing) {
|
||||
s->has_backing = true;
|
||||
s->backing = bdrv_query_stats(bs->backing_hd, query_backing);
|
||||
s->backing = bdrv_query_stats(bs->backing->bs, query_backing);
|
||||
}
|
||||
|
||||
return s;
|
||||
|
|
|
@ -597,13 +597,13 @@ static coroutine_fn int qcow_co_readv(BlockDriverState *bs, int64_t sector_num,
|
|||
}
|
||||
|
||||
if (!cluster_offset) {
|
||||
if (bs->backing_hd) {
|
||||
if (bs->backing) {
|
||||
/* read from the base image */
|
||||
hd_iov.iov_base = (void *)buf;
|
||||
hd_iov.iov_len = n * 512;
|
||||
qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
|
||||
qemu_co_mutex_unlock(&s->lock);
|
||||
ret = bdrv_co_readv(bs->backing_hd, sector_num,
|
||||
ret = bdrv_co_readv(bs->backing->bs, sector_num,
|
||||
n, &hd_qiov);
|
||||
qemu_co_mutex_lock(&s->lock);
|
||||
if (ret < 0) {
|
||||
|
|
|
@ -1473,7 +1473,7 @@ static int discard_single_l2(BlockDriverState *bs, uint64_t offset,
|
|||
*/
|
||||
switch (qcow2_get_cluster_type(old_l2_entry)) {
|
||||
case QCOW2_CLUSTER_UNALLOCATED:
|
||||
if (full_discard || !bs->backing_hd) {
|
||||
if (full_discard || !bs->backing) {
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
|
@ -1707,7 +1707,7 @@ static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
|
|||
}
|
||||
|
||||
if (!preallocated) {
|
||||
if (!bs->backing_hd) {
|
||||
if (!bs->backing) {
|
||||
/* not backed; therefore we can simply deallocate the
|
||||
* cluster */
|
||||
l2_table[j] = 0;
|
||||
|
|
|
@ -1369,9 +1369,9 @@ static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num,
|
|||
switch (ret) {
|
||||
case QCOW2_CLUSTER_UNALLOCATED:
|
||||
|
||||
if (bs->backing_hd) {
|
||||
if (bs->backing) {
|
||||
/* read from the base image */
|
||||
n1 = qcow2_backing_read1(bs->backing_hd, &hd_qiov,
|
||||
n1 = qcow2_backing_read1(bs->backing->bs, &hd_qiov,
|
||||
sector_num, cur_nr_sectors);
|
||||
if (n1 > 0) {
|
||||
QEMUIOVector local_qiov;
|
||||
|
@ -1382,7 +1382,7 @@ static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num,
|
|||
|
||||
BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
|
||||
qemu_co_mutex_unlock(&s->lock);
|
||||
ret = bdrv_co_readv(bs->backing_hd, sector_num,
|
||||
ret = bdrv_co_readv(bs->backing->bs, sector_num,
|
||||
n1, &local_qiov);
|
||||
qemu_co_mutex_lock(&s->lock);
|
||||
|
||||
|
|
12
block/qed.c
12
block/qed.c
|
@ -772,8 +772,8 @@ static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
|
|||
/* If there is a backing file, get its length. Treat the absence of a
|
||||
* backing file like a zero length backing file.
|
||||
*/
|
||||
if (s->bs->backing_hd) {
|
||||
int64_t l = bdrv_getlength(s->bs->backing_hd);
|
||||
if (s->bs->backing) {
|
||||
int64_t l = bdrv_getlength(s->bs->backing->bs);
|
||||
if (l < 0) {
|
||||
cb(opaque, l);
|
||||
return;
|
||||
|
@ -802,7 +802,7 @@ static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
|
|||
qemu_iovec_concat(*backing_qiov, qiov, 0, size);
|
||||
|
||||
BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
|
||||
bdrv_aio_readv(s->bs->backing_hd, pos / BDRV_SECTOR_SIZE,
|
||||
bdrv_aio_readv(s->bs->backing->bs, pos / BDRV_SECTOR_SIZE,
|
||||
*backing_qiov, size / BDRV_SECTOR_SIZE, cb, opaque);
|
||||
}
|
||||
|
||||
|
@ -1081,7 +1081,7 @@ static void qed_aio_write_main(void *opaque, int ret)
|
|||
if (acb->find_cluster_ret == QED_CLUSTER_FOUND) {
|
||||
next_fn = qed_aio_next_io;
|
||||
} else {
|
||||
if (s->bs->backing_hd) {
|
||||
if (s->bs->backing) {
|
||||
next_fn = qed_aio_write_flush_before_l2_update;
|
||||
} else {
|
||||
next_fn = qed_aio_write_l2_update_cb;
|
||||
|
@ -1139,7 +1139,7 @@ static void qed_aio_write_prefill(void *opaque, int ret)
|
|||
static bool qed_should_set_need_check(BDRVQEDState *s)
|
||||
{
|
||||
/* The flush before L2 update path ensures consistency */
|
||||
if (s->bs->backing_hd) {
|
||||
if (s->bs->backing) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1443,7 +1443,7 @@ static int coroutine_fn bdrv_qed_co_write_zeroes(BlockDriverState *bs,
|
|||
struct iovec iov;
|
||||
|
||||
/* Refuse if there are untouched backing file sectors */
|
||||
if (bs->backing_hd) {
|
||||
if (bs->backing) {
|
||||
if (qed_offset_into_cluster(s, sector_num * BDRV_SECTOR_SIZE) != 0) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ static void close_unused_images(BlockDriverState *top, BlockDriverState *base,
|
|||
const char *base_id)
|
||||
{
|
||||
BlockDriverState *intermediate;
|
||||
intermediate = top->backing_hd;
|
||||
intermediate = backing_bs(top);
|
||||
|
||||
/* Must assign before bdrv_delete() to prevent traversing dangling pointer
|
||||
* while we delete backing image instances.
|
||||
|
@ -72,7 +72,7 @@ static void close_unused_images(BlockDriverState *top, BlockDriverState *base,
|
|||
}
|
||||
|
||||
unused = intermediate;
|
||||
intermediate = intermediate->backing_hd;
|
||||
intermediate = backing_bs(intermediate);
|
||||
bdrv_set_backing_hd(unused, NULL);
|
||||
bdrv_unref(unused);
|
||||
}
|
||||
|
@ -121,7 +121,7 @@ static void coroutine_fn stream_run(void *opaque)
|
|||
int n = 0;
|
||||
void *buf;
|
||||
|
||||
if (!bs->backing_hd) {
|
||||
if (!bs->backing) {
|
||||
block_job_completed(&s->common, 0);
|
||||
return;
|
||||
}
|
||||
|
@ -166,7 +166,7 @@ wait:
|
|||
} else if (ret >= 0) {
|
||||
/* Copy if allocated in the intermediate images. Limit to the
|
||||
* known-unallocated area [sector_num, sector_num+n). */
|
||||
ret = bdrv_is_allocated_above(bs->backing_hd, base,
|
||||
ret = bdrv_is_allocated_above(backing_bs(bs), base,
|
||||
sector_num, n, &n);
|
||||
|
||||
/* Finish early if end of backing file has been reached */
|
||||
|
|
21
block/vmdk.c
21
block/vmdk.c
|
@ -308,10 +308,11 @@ static int vmdk_write_cid(BlockDriverState *bs, uint32_t cid)
|
|||
static int vmdk_is_cid_valid(BlockDriverState *bs)
|
||||
{
|
||||
BDRVVmdkState *s = bs->opaque;
|
||||
BlockDriverState *p_bs = bs->backing_hd;
|
||||
uint32_t cur_pcid;
|
||||
|
||||
if (!s->cid_checked && p_bs) {
|
||||
if (!s->cid_checked && bs->backing) {
|
||||
BlockDriverState *p_bs = bs->backing->bs;
|
||||
|
||||
cur_pcid = vmdk_read_cid(p_bs, 0);
|
||||
if (s->parent_cid != cur_pcid) {
|
||||
/* CID not valid */
|
||||
|
@ -1006,7 +1007,7 @@ static int get_whole_cluster(BlockDriverState *bs,
|
|||
cluster_bytes = extent->cluster_sectors << BDRV_SECTOR_BITS;
|
||||
whole_grain = qemu_blockalign(bs, cluster_bytes);
|
||||
|
||||
if (!bs->backing_hd) {
|
||||
if (!bs->backing) {
|
||||
memset(whole_grain, 0, skip_start_sector << BDRV_SECTOR_BITS);
|
||||
memset(whole_grain + (skip_end_sector << BDRV_SECTOR_BITS), 0,
|
||||
cluster_bytes - (skip_end_sector << BDRV_SECTOR_BITS));
|
||||
|
@ -1015,15 +1016,15 @@ static int get_whole_cluster(BlockDriverState *bs,
|
|||
assert(skip_end_sector <= extent->cluster_sectors);
|
||||
/* we will be here if it's first write on non-exist grain(cluster).
|
||||
* try to read from parent image, if exist */
|
||||
if (bs->backing_hd && !vmdk_is_cid_valid(bs)) {
|
||||
if (bs->backing && !vmdk_is_cid_valid(bs)) {
|
||||
ret = VMDK_ERROR;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Read backing data before skip range */
|
||||
if (skip_start_sector > 0) {
|
||||
if (bs->backing_hd) {
|
||||
ret = bdrv_read(bs->backing_hd, sector_num,
|
||||
if (bs->backing) {
|
||||
ret = bdrv_read(bs->backing->bs, sector_num,
|
||||
whole_grain, skip_start_sector);
|
||||
if (ret < 0) {
|
||||
ret = VMDK_ERROR;
|
||||
|
@ -1039,8 +1040,8 @@ static int get_whole_cluster(BlockDriverState *bs,
|
|||
}
|
||||
/* Read backing data after skip range */
|
||||
if (skip_end_sector < extent->cluster_sectors) {
|
||||
if (bs->backing_hd) {
|
||||
ret = bdrv_read(bs->backing_hd, sector_num + skip_end_sector,
|
||||
if (bs->backing) {
|
||||
ret = bdrv_read(bs->backing->bs, sector_num + skip_end_sector,
|
||||
whole_grain + (skip_end_sector << BDRV_SECTOR_BITS),
|
||||
extent->cluster_sectors - skip_end_sector);
|
||||
if (ret < 0) {
|
||||
|
@ -1433,11 +1434,11 @@ static int vmdk_read(BlockDriverState *bs, int64_t sector_num,
|
|||
}
|
||||
if (ret != VMDK_OK) {
|
||||
/* if not allocated, try to read from parent image, if exist */
|
||||
if (bs->backing_hd && ret != VMDK_ZEROED) {
|
||||
if (bs->backing && ret != VMDK_ZEROED) {
|
||||
if (!vmdk_is_cid_valid(bs)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = bdrv_read(bs->backing_hd, sector_num, buf, n);
|
||||
ret = bdrv_read(bs->backing->bs, sector_num, buf, n);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -2972,9 +2972,9 @@ static int enable_write_target(BDRVVVFATState *s, Error **errp)
|
|||
#endif
|
||||
|
||||
bdrv_set_backing_hd(s->bs, bdrv_new());
|
||||
s->bs->backing_hd->drv = &vvfat_write_target;
|
||||
s->bs->backing_hd->opaque = g_new(void *, 1);
|
||||
*(void**)s->bs->backing_hd->opaque = s;
|
||||
s->bs->backing->bs->drv = &vvfat_write_target;
|
||||
s->bs->backing->bs->opaque = g_new(void *, 1);
|
||||
*(void**)s->bs->backing->bs->opaque = s;
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue