mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-02 15:23:53 -06:00
Block layer patches
- Managing inactive nodes (enables QSD migration with shared storage) - Fix swapped values for BLOCK_IO_ERROR 'device' and 'qom-path' - vpc: Read images exported from Azure correctly - scripts/qemu-gdb: Support coroutine dumps in coredumps - Minor cleanups -----BEGIN PGP SIGNATURE----- iQJFBAABCAAvFiEE3D3rFZqa+V09dFb+fwmycsiPL9YFAmek34IRHGt3b2xmQHJl ZGhhdC5jb20ACgkQfwmycsiPL9bDpxAAnTvwmdazAXG0g9GzqvrEB/+6rStjAsqE 9MTWV4WxyN41d0RXxN8CYKb8CXSiTRyw6r3CSGNYEI2eShe9e934PriSkZm41HyX n9Yh5YxqGZqitzvPtx62Ii/1KG+PcjQbfHuK1p4+rlKa0yQ2eGlio1JIIrZrCkBZ ikZcQUrhIyD0XV8hTQ2+Ysa+ZN6itjnlTQIG3gS3m8f8WR7kyUXD8YFMQFJFyjVx NrAIpLnc/ln9+5PZR9tje8U7XEn2KCgI5pgGaQnrd0h0G1H4ig8ogzYYnKTLhjU/ AmQpS8np8Tyg6S1UZTiekEq0VuAhThEQc5b3sGbmHWH/R2ABMStyf18oCBAkPzZ7 s6h+3XzTKKY2Q5Q3ZG/ANkUJjTNBhdj1fcaARvbSWsqsuk5CWX/I3jzvgihFtCSs eGu+b/bLeW6P7hu4qPHBcgLHuB1Fc7Rd2t4BoIGM1wcO2CeC9DzUKOiIMZOEJIh0 GGqCkEWDHgckDTakD4/vSqm0UDKt6FSlQC9ga/ILBY3IB5HpHoArY58selymy28i X7MgAvbjdsmNuUuXDZZOiObcFt3j8jlmwPJpPyzXPQIiPX1RXeBPRhVAEeZCKn6Z tfHr72SJdMeVOGXVTvOrJ2iW+4g03rPdmkDFCUhpOwo62RODq7ahvCIXsNf3nEFR rSB3T1M/8EM= =iQLP -----END PGP SIGNATURE----- Merge tag 'for-upstream' of https://repo.or.cz/qemu/kevin into staging Block layer patches - Managing inactive nodes (enables QSD migration with shared storage) - Fix swapped values for BLOCK_IO_ERROR 'device' and 'qom-path' - vpc: Read images exported from Azure correctly - scripts/qemu-gdb: Support coroutine dumps in coredumps - Minor cleanups # -----BEGIN PGP SIGNATURE----- # # iQJFBAABCAAvFiEE3D3rFZqa+V09dFb+fwmycsiPL9YFAmek34IRHGt3b2xmQHJl # ZGhhdC5jb20ACgkQfwmycsiPL9bDpxAAnTvwmdazAXG0g9GzqvrEB/+6rStjAsqE # 9MTWV4WxyN41d0RXxN8CYKb8CXSiTRyw6r3CSGNYEI2eShe9e934PriSkZm41HyX # n9Yh5YxqGZqitzvPtx62Ii/1KG+PcjQbfHuK1p4+rlKa0yQ2eGlio1JIIrZrCkBZ # ikZcQUrhIyD0XV8hTQ2+Ysa+ZN6itjnlTQIG3gS3m8f8WR7kyUXD8YFMQFJFyjVx # NrAIpLnc/ln9+5PZR9tje8U7XEn2KCgI5pgGaQnrd0h0G1H4ig8ogzYYnKTLhjU/ # AmQpS8np8Tyg6S1UZTiekEq0VuAhThEQc5b3sGbmHWH/R2ABMStyf18oCBAkPzZ7 # s6h+3XzTKKY2Q5Q3ZG/ANkUJjTNBhdj1fcaARvbSWsqsuk5CWX/I3jzvgihFtCSs # eGu+b/bLeW6P7hu4qPHBcgLHuB1Fc7Rd2t4BoIGM1wcO2CeC9DzUKOiIMZOEJIh0 # GGqCkEWDHgckDTakD4/vSqm0UDKt6FSlQC9ga/ILBY3IB5HpHoArY58selymy28i # X7MgAvbjdsmNuUuXDZZOiObcFt3j8jlmwPJpPyzXPQIiPX1RXeBPRhVAEeZCKn6Z # tfHr72SJdMeVOGXVTvOrJ2iW+4g03rPdmkDFCUhpOwo62RODq7ahvCIXsNf3nEFR # rSB3T1M/8EM= # =iQLP # -----END PGP SIGNATURE----- # gpg: Signature made Thu 06 Feb 2025 11:12:50 EST # gpg: using RSA key DC3DEB159A9AF95D3D7456FE7F09B272C88F2FD6 # gpg: issuer "kwolf@redhat.com" # gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>" [full] # Primary key fingerprint: DC3D EB15 9A9A F95D 3D74 56FE 7F09 B272 C88F 2FD6 * tag 'for-upstream' of https://repo.or.cz/qemu/kevin: (25 commits) block: remove unused BLOCK_OP_TYPE_DATAPLANE iotests: Add (NBD-based) tests for inactive nodes iotests: Add qsd-migrate case iotests: Add filter_qtest() nbd/server: Support inactive nodes block/export: Add option to allow export of inactive nodes block: Drain nodes before inactivating them block/export: Don't ignore image activation error in blk_exp_add() block: Support inactive nodes in blk_insert_bs() block: Add blockdev-set-active QMP command block: Add option to create inactive nodes block: Fix crash on block_resize on inactive node block: Don't attach inactive child to active node migration/block-active: Remove global active flag block: Inactivate external snapshot overlays when necessary block: Allow inactivating already inactive nodes block: Add 'active' field to BlockDeviceInfo block-backend: Fix argument order when calling 'qapi_event_send_block_io_error()' scripts/qemu-gdb: Support coroutine dumps in coredumps scripts/qemu-gdb: Simplify fs_base fetching for coroutines ... Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
f2ec48fefd
35 changed files with 1133 additions and 166 deletions
64
block.c
64
block.c
|
@ -1573,6 +1573,10 @@ static void update_flags_from_options(int *flags, QemuOpts *opts)
|
||||||
if (qemu_opt_get_bool_del(opts, BDRV_OPT_AUTO_READ_ONLY, false)) {
|
if (qemu_opt_get_bool_del(opts, BDRV_OPT_AUTO_READ_ONLY, false)) {
|
||||||
*flags |= BDRV_O_AUTO_RDONLY;
|
*flags |= BDRV_O_AUTO_RDONLY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!qemu_opt_get_bool_del(opts, BDRV_OPT_ACTIVE, true)) {
|
||||||
|
*flags |= BDRV_O_INACTIVE;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void update_options_from_flags(QDict *options, int flags)
|
static void update_options_from_flags(QDict *options, int flags)
|
||||||
|
@ -1799,6 +1803,11 @@ QemuOptsList bdrv_runtime_opts = {
|
||||||
.type = QEMU_OPT_BOOL,
|
.type = QEMU_OPT_BOOL,
|
||||||
.help = "Ignore flush requests",
|
.help = "Ignore flush requests",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.name = BDRV_OPT_ACTIVE,
|
||||||
|
.type = QEMU_OPT_BOOL,
|
||||||
|
.help = "Node is activated",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
.name = BDRV_OPT_READ_ONLY,
|
.name = BDRV_OPT_READ_ONLY,
|
||||||
.type = QEMU_OPT_BOOL,
|
.type = QEMU_OPT_BOOL,
|
||||||
|
@ -3077,6 +3086,13 @@ bdrv_attach_child_common(BlockDriverState *child_bs,
|
||||||
assert(child_class->get_parent_desc);
|
assert(child_class->get_parent_desc);
|
||||||
GLOBAL_STATE_CODE();
|
GLOBAL_STATE_CODE();
|
||||||
|
|
||||||
|
if (bdrv_is_inactive(child_bs) && (perm & ~BLK_PERM_CONSISTENT_READ)) {
|
||||||
|
g_autofree char *perm_names = bdrv_perm_names(perm);
|
||||||
|
error_setg(errp, "Permission '%s' unavailable on inactive node",
|
||||||
|
perm_names);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
new_child = g_new(BdrvChild, 1);
|
new_child = g_new(BdrvChild, 1);
|
||||||
*new_child = (BdrvChild) {
|
*new_child = (BdrvChild) {
|
||||||
.bs = NULL,
|
.bs = NULL,
|
||||||
|
@ -3183,6 +3199,11 @@ bdrv_attach_child_noperm(BlockDriverState *parent_bs,
|
||||||
child_bs->node_name, child_name, parent_bs->node_name);
|
child_bs->node_name, child_name, parent_bs->node_name);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
if (bdrv_is_inactive(child_bs) && !bdrv_is_inactive(parent_bs)) {
|
||||||
|
error_setg(errp, "Inactive '%s' can't be a %s child of active '%s'",
|
||||||
|
child_bs->node_name, child_name, parent_bs->node_name);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
bdrv_get_cumulative_perm(parent_bs, &perm, &shared_perm);
|
bdrv_get_cumulative_perm(parent_bs, &perm, &shared_perm);
|
||||||
bdrv_child_perm(parent_bs, child_bs, NULL, child_role, NULL,
|
bdrv_child_perm(parent_bs, child_bs, NULL, child_role, NULL,
|
||||||
|
@ -6824,6 +6845,10 @@ void bdrv_init_with_whitelist(void)
|
||||||
bdrv_init();
|
bdrv_init();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool bdrv_is_inactive(BlockDriverState *bs) {
|
||||||
|
return bs->open_flags & BDRV_O_INACTIVE;
|
||||||
|
}
|
||||||
|
|
||||||
int bdrv_activate(BlockDriverState *bs, Error **errp)
|
int bdrv_activate(BlockDriverState *bs, Error **errp)
|
||||||
{
|
{
|
||||||
BdrvChild *child, *parent;
|
BdrvChild *child, *parent;
|
||||||
|
@ -6955,7 +6980,8 @@ bdrv_has_bds_parent(BlockDriverState *bs, bool only_active)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int GRAPH_RDLOCK bdrv_inactivate_recurse(BlockDriverState *bs)
|
static int GRAPH_RDLOCK
|
||||||
|
bdrv_inactivate_recurse(BlockDriverState *bs, bool top_level)
|
||||||
{
|
{
|
||||||
BdrvChild *child, *parent;
|
BdrvChild *child, *parent;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -6973,7 +6999,14 @@ static int GRAPH_RDLOCK bdrv_inactivate_recurse(BlockDriverState *bs)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(!(bs->open_flags & BDRV_O_INACTIVE));
|
/*
|
||||||
|
* Inactivating an already inactive node on user request is harmless, but if
|
||||||
|
* a child is already inactive before its parent, that's bad.
|
||||||
|
*/
|
||||||
|
if (bs->open_flags & BDRV_O_INACTIVE) {
|
||||||
|
assert(top_level);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* Inactivate this node */
|
/* Inactivate this node */
|
||||||
if (bs->drv->bdrv_inactivate) {
|
if (bs->drv->bdrv_inactivate) {
|
||||||
|
@ -6999,7 +7032,9 @@ static int GRAPH_RDLOCK bdrv_inactivate_recurse(BlockDriverState *bs)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bdrv_drained_begin(bs);
|
||||||
bs->open_flags |= BDRV_O_INACTIVE;
|
bs->open_flags |= BDRV_O_INACTIVE;
|
||||||
|
bdrv_drained_end(bs);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Update permissions, they may differ for inactive nodes.
|
* Update permissions, they may differ for inactive nodes.
|
||||||
|
@ -7010,7 +7045,7 @@ static int GRAPH_RDLOCK bdrv_inactivate_recurse(BlockDriverState *bs)
|
||||||
|
|
||||||
/* Recursively inactivate children */
|
/* Recursively inactivate children */
|
||||||
QLIST_FOREACH(child, &bs->children, next) {
|
QLIST_FOREACH(child, &bs->children, next) {
|
||||||
ret = bdrv_inactivate_recurse(child->bs);
|
ret = bdrv_inactivate_recurse(child->bs, false);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -7019,6 +7054,27 @@ static int GRAPH_RDLOCK bdrv_inactivate_recurse(BlockDriverState *bs)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int bdrv_inactivate(BlockDriverState *bs, Error **errp)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
GLOBAL_STATE_CODE();
|
||||||
|
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||||
|
|
||||||
|
if (bdrv_has_bds_parent(bs, true)) {
|
||||||
|
error_setg(errp, "Node has active parent node");
|
||||||
|
return -EPERM;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = bdrv_inactivate_recurse(bs, true);
|
||||||
|
if (ret < 0) {
|
||||||
|
error_setg_errno(errp, -ret, "Failed to inactivate node");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int bdrv_inactivate_all(void)
|
int bdrv_inactivate_all(void)
|
||||||
{
|
{
|
||||||
BlockDriverState *bs = NULL;
|
BlockDriverState *bs = NULL;
|
||||||
|
@ -7035,7 +7091,7 @@ int bdrv_inactivate_all(void)
|
||||||
if (bdrv_has_bds_parent(bs, false)) {
|
if (bdrv_has_bds_parent(bs, false)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
ret = bdrv_inactivate_recurse(bs);
|
ret = bdrv_inactivate_recurse(bs, true);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
bdrv_next_cleanup(&it);
|
bdrv_next_cleanup(&it);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -253,7 +253,7 @@ static bool blk_can_inactivate(BlockBackend *blk)
|
||||||
* guest. For block job BBs that satisfy this, we can just allow
|
* guest. For block job BBs that satisfy this, we can just allow
|
||||||
* it. This is the case for mirror job source, which is required
|
* it. This is the case for mirror job source, which is required
|
||||||
* by libvirt non-shared block migration. */
|
* by libvirt non-shared block migration. */
|
||||||
if (!(blk->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED))) {
|
if (!(blk->perm & ~BLK_PERM_CONSISTENT_READ)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -900,14 +900,24 @@ void blk_remove_bs(BlockBackend *blk)
|
||||||
int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
|
int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
|
||||||
{
|
{
|
||||||
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
|
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
|
||||||
|
uint64_t perm, shared_perm;
|
||||||
|
|
||||||
GLOBAL_STATE_CODE();
|
GLOBAL_STATE_CODE();
|
||||||
bdrv_ref(bs);
|
bdrv_ref(bs);
|
||||||
bdrv_graph_wrlock();
|
bdrv_graph_wrlock();
|
||||||
|
|
||||||
|
if ((bs->open_flags & BDRV_O_INACTIVE) && blk_can_inactivate(blk)) {
|
||||||
|
blk->disable_perm = true;
|
||||||
|
perm = 0;
|
||||||
|
shared_perm = BLK_PERM_ALL;
|
||||||
|
} else {
|
||||||
|
perm = blk->perm;
|
||||||
|
shared_perm = blk->shared_perm;
|
||||||
|
}
|
||||||
|
|
||||||
blk->root = bdrv_root_attach_child(bs, "root", &child_root,
|
blk->root = bdrv_root_attach_child(bs, "root", &child_root,
|
||||||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
||||||
blk->perm, blk->shared_perm,
|
perm, shared_perm, blk, errp);
|
||||||
blk, errp);
|
|
||||||
bdrv_graph_wrunlock();
|
bdrv_graph_wrunlock();
|
||||||
if (blk->root == NULL) {
|
if (blk->root == NULL) {
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
@ -1019,6 +1029,10 @@ DeviceState *blk_get_attached_dev(BlockBackend *blk)
|
||||||
return blk->dev;
|
return blk->dev;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The caller is responsible for releasing the value returned
|
||||||
|
* with g_free() after use.
|
||||||
|
*/
|
||||||
static char *blk_get_attached_dev_id_or_path(BlockBackend *blk, bool want_id)
|
static char *blk_get_attached_dev_id_or_path(BlockBackend *blk, bool want_id)
|
||||||
{
|
{
|
||||||
DeviceState *dev = blk->dev;
|
DeviceState *dev = blk->dev;
|
||||||
|
@ -1033,15 +1047,15 @@ static char *blk_get_attached_dev_id_or_path(BlockBackend *blk, bool want_id)
|
||||||
return object_get_canonical_path(OBJECT(dev)) ?: g_strdup("");
|
return object_get_canonical_path(OBJECT(dev)) ?: g_strdup("");
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Return the qdev ID, or if no ID is assigned the QOM path, of the block
|
|
||||||
* device attached to the BlockBackend.
|
|
||||||
*/
|
|
||||||
char *blk_get_attached_dev_id(BlockBackend *blk)
|
char *blk_get_attached_dev_id(BlockBackend *blk)
|
||||||
{
|
{
|
||||||
return blk_get_attached_dev_id_or_path(blk, true);
|
return blk_get_attached_dev_id_or_path(blk, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The caller is responsible for releasing the value returned
|
||||||
|
* with g_free() after use.
|
||||||
|
*/
|
||||||
static char *blk_get_attached_dev_path(BlockBackend *blk)
|
static char *blk_get_attached_dev_path(BlockBackend *blk)
|
||||||
{
|
{
|
||||||
return blk_get_attached_dev_id_or_path(blk, false);
|
return blk_get_attached_dev_id_or_path(blk, false);
|
||||||
|
@ -2134,10 +2148,10 @@ static void send_qmp_error_event(BlockBackend *blk,
|
||||||
{
|
{
|
||||||
IoOperationType optype;
|
IoOperationType optype;
|
||||||
BlockDriverState *bs = blk_bs(blk);
|
BlockDriverState *bs = blk_bs(blk);
|
||||||
|
g_autofree char *path = blk_get_attached_dev_path(blk);
|
||||||
|
|
||||||
optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
|
optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
|
||||||
qapi_event_send_block_io_error(blk_name(blk),
|
qapi_event_send_block_io_error(path, blk_name(blk),
|
||||||
blk_get_attached_dev_path(blk),
|
|
||||||
bs ? bdrv_get_node_name(bs) : NULL, optype,
|
bs ? bdrv_get_node_name(bs) : NULL, optype,
|
||||||
action, blk_iostatus_is_enabled(blk),
|
action, blk_iostatus_is_enabled(blk),
|
||||||
error == ENOSPC, strerror(error));
|
error == ENOSPC, strerror(error));
|
||||||
|
|
|
@ -75,6 +75,7 @@ static const BlockExportDriver *blk_exp_find_driver(BlockExportType type)
|
||||||
BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
|
BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
|
||||||
{
|
{
|
||||||
bool fixed_iothread = export->has_fixed_iothread && export->fixed_iothread;
|
bool fixed_iothread = export->has_fixed_iothread && export->fixed_iothread;
|
||||||
|
bool allow_inactive = export->has_allow_inactive && export->allow_inactive;
|
||||||
const BlockExportDriver *drv;
|
const BlockExportDriver *drv;
|
||||||
BlockExport *exp = NULL;
|
BlockExport *exp = NULL;
|
||||||
BlockDriverState *bs;
|
BlockDriverState *bs;
|
||||||
|
@ -138,14 +139,25 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Block exports are used for non-shared storage migration. Make sure
|
|
||||||
* that BDRV_O_INACTIVE is cleared and the image is ready for write
|
|
||||||
* access since the export could be available before migration handover.
|
|
||||||
* ctx was acquired in the caller.
|
|
||||||
*/
|
|
||||||
bdrv_graph_rdlock_main_loop();
|
bdrv_graph_rdlock_main_loop();
|
||||||
bdrv_activate(bs, NULL);
|
if (allow_inactive) {
|
||||||
|
if (!drv->supports_inactive) {
|
||||||
|
error_setg(errp, "Export type does not support inactive exports");
|
||||||
|
bdrv_graph_rdunlock_main_loop();
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* Block exports are used for non-shared storage migration. Make sure
|
||||||
|
* that BDRV_O_INACTIVE is cleared and the image is ready for write
|
||||||
|
* access since the export could be available before migration handover.
|
||||||
|
*/
|
||||||
|
ret = bdrv_activate(bs, errp);
|
||||||
|
if (ret < 0) {
|
||||||
|
bdrv_graph_rdunlock_main_loop();
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
}
|
||||||
bdrv_graph_rdunlock_main_loop();
|
bdrv_graph_rdunlock_main_loop();
|
||||||
|
|
||||||
perm = BLK_PERM_CONSISTENT_READ;
|
perm = BLK_PERM_CONSISTENT_READ;
|
||||||
|
@ -158,6 +170,9 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
|
||||||
if (!fixed_iothread) {
|
if (!fixed_iothread) {
|
||||||
blk_set_allow_aio_context_change(blk, true);
|
blk_set_allow_aio_context_change(blk, true);
|
||||||
}
|
}
|
||||||
|
if (allow_inactive) {
|
||||||
|
blk_set_force_allow_inactivate(blk);
|
||||||
|
}
|
||||||
|
|
||||||
ret = blk_insert_bs(blk, bs, errp);
|
ret = blk_insert_bs(blk, bs, errp);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
|
|
|
@ -630,11 +630,12 @@ static void print_block_info(Monitor *mon, BlockInfo *info,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (inserted) {
|
if (inserted) {
|
||||||
monitor_printf(mon, ": %s (%s%s%s)\n",
|
monitor_printf(mon, ": %s (%s%s%s%s)\n",
|
||||||
inserted->file,
|
inserted->file,
|
||||||
inserted->drv,
|
inserted->drv,
|
||||||
inserted->ro ? ", read-only" : "",
|
inserted->ro ? ", read-only" : "",
|
||||||
inserted->encrypted ? ", encrypted" : "");
|
inserted->encrypted ? ", encrypted" : "",
|
||||||
|
inserted->active ? "" : ", inactive");
|
||||||
} else {
|
} else {
|
||||||
monitor_printf(mon, ": [not inserted]\n");
|
monitor_printf(mon, ": [not inserted]\n");
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,6 +63,7 @@ BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk,
|
||||||
info->file = g_strdup(bs->filename);
|
info->file = g_strdup(bs->filename);
|
||||||
info->ro = bdrv_is_read_only(bs);
|
info->ro = bdrv_is_read_only(bs);
|
||||||
info->drv = g_strdup(bs->drv->format_name);
|
info->drv = g_strdup(bs->drv->format_name);
|
||||||
|
info->active = !bdrv_is_inactive(bs);
|
||||||
info->encrypted = bs->encrypted;
|
info->encrypted = bs->encrypted;
|
||||||
|
|
||||||
info->cache = g_new(BlockdevCacheInfo, 1);
|
info->cache = g_new(BlockdevCacheInfo, 1);
|
||||||
|
|
|
@ -576,7 +576,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
bdrv_op_block_all(top_bs, s->blocker);
|
bdrv_op_block_all(top_bs, s->blocker);
|
||||||
bdrv_op_unblock(top_bs, BLOCK_OP_TYPE_DATAPLANE, s->blocker);
|
|
||||||
|
|
||||||
bdrv_graph_wrunlock();
|
bdrv_graph_wrunlock();
|
||||||
|
|
||||||
|
|
65
block/vpc.c
65
block/vpc.c
|
@ -216,6 +216,39 @@ static void vpc_parse_options(BlockDriverState *bs, QemuOpts *opts,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Microsoft Virtual PC and Microsoft Hyper-V produce and read
|
||||||
|
* VHD image sizes differently. VPC will rely on CHS geometry,
|
||||||
|
* while Hyper-V and disk2vhd use the size specified in the footer.
|
||||||
|
*
|
||||||
|
* We use a couple of approaches to try and determine the correct method:
|
||||||
|
* look at the Creator App field, and look for images that have CHS
|
||||||
|
* geometry that is the maximum value.
|
||||||
|
*
|
||||||
|
* If the CHS geometry is the maximum CHS geometry, then we assume that
|
||||||
|
* the size is the footer->current_size to avoid truncation. Otherwise,
|
||||||
|
* we follow the table based on footer->creator_app:
|
||||||
|
*
|
||||||
|
* Known creator apps:
|
||||||
|
* 'vpc ' : CHS Virtual PC (uses disk geometry)
|
||||||
|
* 'qemu' : CHS QEMU (uses disk geometry)
|
||||||
|
* 'qem2' : current_size QEMU (uses current_size)
|
||||||
|
* 'win ' : current_size Hyper-V
|
||||||
|
* 'd2v ' : current_size Disk2vhd
|
||||||
|
* 'tap\0' : current_size XenServer
|
||||||
|
* 'CTXS' : current_size XenConverter
|
||||||
|
* 'wa\0\0': current_size Azure
|
||||||
|
*
|
||||||
|
* The user can override the table values via drive options, however
|
||||||
|
* even with an override we will still use current_size for images
|
||||||
|
* that have CHS geometry of the maximum size.
|
||||||
|
*/
|
||||||
|
static bool vpc_ignore_current_size(VHDFooter *footer)
|
||||||
|
{
|
||||||
|
return !strncmp(footer->creator_app, "vpc ", 4) ||
|
||||||
|
!strncmp(footer->creator_app, "qemu", 4);
|
||||||
|
}
|
||||||
|
|
||||||
static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
|
static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
|
||||||
Error **errp)
|
Error **errp)
|
||||||
{
|
{
|
||||||
|
@ -304,36 +337,8 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
|
||||||
bs->total_sectors = (int64_t)
|
bs->total_sectors = (int64_t)
|
||||||
be16_to_cpu(footer->cyls) * footer->heads * footer->secs_per_cyl;
|
be16_to_cpu(footer->cyls) * footer->heads * footer->secs_per_cyl;
|
||||||
|
|
||||||
/* Microsoft Virtual PC and Microsoft Hyper-V produce and read
|
/* Use CHS or current_size to determine the image size. */
|
||||||
* VHD image sizes differently. VPC will rely on CHS geometry,
|
use_chs = vpc_ignore_current_size(footer) || s->force_use_chs;
|
||||||
* while Hyper-V and disk2vhd use the size specified in the footer.
|
|
||||||
*
|
|
||||||
* We use a couple of approaches to try and determine the correct method:
|
|
||||||
* look at the Creator App field, and look for images that have CHS
|
|
||||||
* geometry that is the maximum value.
|
|
||||||
*
|
|
||||||
* If the CHS geometry is the maximum CHS geometry, then we assume that
|
|
||||||
* the size is the footer->current_size to avoid truncation. Otherwise,
|
|
||||||
* we follow the table based on footer->creator_app:
|
|
||||||
*
|
|
||||||
* Known creator apps:
|
|
||||||
* 'vpc ' : CHS Virtual PC (uses disk geometry)
|
|
||||||
* 'qemu' : CHS QEMU (uses disk geometry)
|
|
||||||
* 'qem2' : current_size QEMU (uses current_size)
|
|
||||||
* 'win ' : current_size Hyper-V
|
|
||||||
* 'd2v ' : current_size Disk2vhd
|
|
||||||
* 'tap\0' : current_size XenServer
|
|
||||||
* 'CTXS' : current_size XenConverter
|
|
||||||
*
|
|
||||||
* The user can override the table values via drive options, however
|
|
||||||
* even with an override we will still use current_size for images
|
|
||||||
* that have CHS geometry of the maximum size.
|
|
||||||
*/
|
|
||||||
use_chs = (!!strncmp(footer->creator_app, "win ", 4) &&
|
|
||||||
!!strncmp(footer->creator_app, "qem2", 4) &&
|
|
||||||
!!strncmp(footer->creator_app, "d2v ", 4) &&
|
|
||||||
!!strncmp(footer->creator_app, "CTXS", 4) &&
|
|
||||||
!!memcmp(footer->creator_app, "tap", 4)) || s->force_use_chs;
|
|
||||||
|
|
||||||
if (!use_chs || bs->total_sectors == VHD_MAX_GEOMETRY || s->force_use_sz) {
|
if (!use_chs || bs->total_sectors == VHD_MAX_GEOMETRY || s->force_use_sz) {
|
||||||
bs->total_sectors = be64_to_cpu(footer->current_size) /
|
bs->total_sectors = be64_to_cpu(footer->current_size) /
|
||||||
|
|
48
blockdev.c
48
blockdev.c
|
@ -1497,6 +1497,22 @@ static void external_snapshot_action(TransactionAction *action,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Older QEMU versions have allowed adding an active parent node to an
|
||||||
|
* inactive child node. This is unsafe in the general case, but there is an
|
||||||
|
* important use case, which is taking a VM snapshot with migration to file
|
||||||
|
* and then adding an external snapshot while the VM is still stopped and
|
||||||
|
* images are inactive. Requiring the user to explicitly create the overlay
|
||||||
|
* as inactive would break compatibility, so just do it automatically here
|
||||||
|
* to keep this working.
|
||||||
|
*/
|
||||||
|
if (bdrv_is_inactive(state->old_bs) && !bdrv_is_inactive(state->new_bs)) {
|
||||||
|
ret = bdrv_inactivate(state->new_bs, errp);
|
||||||
|
if (ret < 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ret = bdrv_append(state->new_bs, state->old_bs, errp);
|
ret = bdrv_append(state->new_bs, state->old_bs, errp);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
return;
|
return;
|
||||||
|
@ -3455,6 +3471,38 @@ void qmp_blockdev_del(const char *node_name, Error **errp)
|
||||||
bdrv_unref(bs);
|
bdrv_unref(bs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void qmp_blockdev_set_active(const char *node_name, bool active, Error **errp)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
GLOBAL_STATE_CODE();
|
||||||
|
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||||
|
|
||||||
|
if (!node_name) {
|
||||||
|
if (active) {
|
||||||
|
bdrv_activate_all(errp);
|
||||||
|
} else {
|
||||||
|
ret = bdrv_inactivate_all();
|
||||||
|
if (ret < 0) {
|
||||||
|
error_setg_errno(errp, -ret, "Failed to inactivate all nodes");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
BlockDriverState *bs = bdrv_find_node(node_name);
|
||||||
|
if (!bs) {
|
||||||
|
error_setg(errp, "Failed to find node with node-name='%s'",
|
||||||
|
node_name);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (active) {
|
||||||
|
bdrv_activate(bs, errp);
|
||||||
|
} else {
|
||||||
|
bdrv_inactivate(bs, errp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static BdrvChild * GRAPH_RDLOCK
|
static BdrvChild * GRAPH_RDLOCK
|
||||||
bdrv_find_child(BlockDriverState *parent_bs, const char *child_name)
|
bdrv_find_child(BlockDriverState *parent_bs, const char *child_name)
|
||||||
{
|
{
|
||||||
|
|
|
@ -539,8 +539,6 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
|
|
||||||
|
|
||||||
if (!block_job_set_speed(job, speed, errp)) {
|
if (!block_job_set_speed(job, speed, errp)) {
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1562,15 +1562,6 @@ static bool virtio_blk_vq_aio_context_init(VirtIOBlock *s, Error **errp)
|
||||||
error_setg(errp, "ioeventfd is required for iothread");
|
error_setg(errp, "ioeventfd is required for iothread");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* If ioeventfd is (re-)enabled while the guest is running there could
|
|
||||||
* be block jobs that can conflict.
|
|
||||||
*/
|
|
||||||
if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
|
|
||||||
error_prepend(errp, "cannot start virtio-blk ioeventfd: ");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
s->vq_aio_context = g_new(AioContext *, conf->num_queues);
|
s->vq_aio_context = g_new(AioContext *, conf->num_queues);
|
||||||
|
|
|
@ -1065,9 +1065,6 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (s->ctx && !s->dataplane_fenced) {
|
if (s->ctx && !s->dataplane_fenced) {
|
||||||
if (blk_op_is_blocked(sd->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp);
|
ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -257,6 +257,7 @@ typedef enum {
|
||||||
#define BDRV_OPT_AUTO_READ_ONLY "auto-read-only"
|
#define BDRV_OPT_AUTO_READ_ONLY "auto-read-only"
|
||||||
#define BDRV_OPT_DISCARD "discard"
|
#define BDRV_OPT_DISCARD "discard"
|
||||||
#define BDRV_OPT_FORCE_SHARE "force-share"
|
#define BDRV_OPT_FORCE_SHARE "force-share"
|
||||||
|
#define BDRV_OPT_ACTIVE "active"
|
||||||
|
|
||||||
|
|
||||||
#define BDRV_SECTOR_BITS 9
|
#define BDRV_SECTOR_BITS 9
|
||||||
|
@ -355,7 +356,6 @@ typedef enum BlockOpType {
|
||||||
BLOCK_OP_TYPE_CHANGE,
|
BLOCK_OP_TYPE_CHANGE,
|
||||||
BLOCK_OP_TYPE_COMMIT_SOURCE,
|
BLOCK_OP_TYPE_COMMIT_SOURCE,
|
||||||
BLOCK_OP_TYPE_COMMIT_TARGET,
|
BLOCK_OP_TYPE_COMMIT_TARGET,
|
||||||
BLOCK_OP_TYPE_DATAPLANE,
|
|
||||||
BLOCK_OP_TYPE_DRIVE_DEL,
|
BLOCK_OP_TYPE_DRIVE_DEL,
|
||||||
BLOCK_OP_TYPE_EJECT,
|
BLOCK_OP_TYPE_EJECT,
|
||||||
BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT,
|
BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT,
|
||||||
|
|
|
@ -175,12 +175,18 @@ BlockDriverState * GRAPH_RDLOCK
|
||||||
check_to_replace_node(BlockDriverState *parent_bs, const char *node_name,
|
check_to_replace_node(BlockDriverState *parent_bs, const char *node_name,
|
||||||
Error **errp);
|
Error **errp);
|
||||||
|
|
||||||
|
|
||||||
|
bool GRAPH_RDLOCK bdrv_is_inactive(BlockDriverState *bs);
|
||||||
|
|
||||||
int no_coroutine_fn GRAPH_RDLOCK
|
int no_coroutine_fn GRAPH_RDLOCK
|
||||||
bdrv_activate(BlockDriverState *bs, Error **errp);
|
bdrv_activate(BlockDriverState *bs, Error **errp);
|
||||||
|
|
||||||
int coroutine_fn no_co_wrapper_bdrv_rdlock
|
int coroutine_fn no_co_wrapper_bdrv_rdlock
|
||||||
bdrv_co_activate(BlockDriverState *bs, Error **errp);
|
bdrv_co_activate(BlockDriverState *bs, Error **errp);
|
||||||
|
|
||||||
|
int no_coroutine_fn
|
||||||
|
bdrv_inactivate(BlockDriverState *bs, Error **errp);
|
||||||
|
|
||||||
void bdrv_activate_all(Error **errp);
|
void bdrv_activate_all(Error **errp);
|
||||||
int bdrv_inactivate_all(void);
|
int bdrv_inactivate_all(void);
|
||||||
|
|
||||||
|
|
|
@ -29,6 +29,9 @@ typedef struct BlockExportDriver {
|
||||||
*/
|
*/
|
||||||
size_t instance_size;
|
size_t instance_size;
|
||||||
|
|
||||||
|
/* True if the export type supports running on an inactive node */
|
||||||
|
bool supports_inactive;
|
||||||
|
|
||||||
/* Creates and starts a new block export */
|
/* Creates and starts a new block export */
|
||||||
int (*create)(BlockExport *, BlockExportOptions *, Error **);
|
int (*create)(BlockExport *, BlockExportOptions *, Error **);
|
||||||
|
|
||||||
|
|
|
@ -32,6 +32,13 @@ void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow);
|
||||||
void blk_set_disable_request_queuing(BlockBackend *blk, bool disable);
|
void blk_set_disable_request_queuing(BlockBackend *blk, bool disable);
|
||||||
bool blk_iostatus_is_enabled(const BlockBackend *blk);
|
bool blk_iostatus_is_enabled(const BlockBackend *blk);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return the qdev ID, or if no ID is assigned the QOM path,
|
||||||
|
* of the block device attached to the BlockBackend.
|
||||||
|
*
|
||||||
|
* The caller is responsible for releasing the value returned
|
||||||
|
* with g_free() after use.
|
||||||
|
*/
|
||||||
char *blk_get_attached_dev_id(BlockBackend *blk);
|
char *blk_get_attached_dev_id(BlockBackend *blk);
|
||||||
|
|
||||||
BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset,
|
BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset,
|
||||||
|
|
|
@ -12,51 +12,12 @@
|
||||||
#include "qemu/error-report.h"
|
#include "qemu/error-report.h"
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
|
||||||
/*
|
|
||||||
* Migration-only cache to remember the block layer activation status.
|
|
||||||
* Protected by BQL.
|
|
||||||
*
|
|
||||||
* We need this because..
|
|
||||||
*
|
|
||||||
* - Migration can fail after block devices are invalidated (during
|
|
||||||
* switchover phase). When that happens, we need to be able to recover
|
|
||||||
* the block drive status by re-activating them.
|
|
||||||
*
|
|
||||||
* - Currently bdrv_inactivate_all() is not safe to be invoked on top of
|
|
||||||
* invalidated drives (even if bdrv_activate_all() is actually safe to be
|
|
||||||
* called any time!). It means remembering this could help migration to
|
|
||||||
* make sure it won't invalidate twice in a row, crashing QEMU. It can
|
|
||||||
* happen when we migrate a PAUSED VM from host1 to host2, then migrate
|
|
||||||
* again to host3 without starting it. TODO: a cleaner solution is to
|
|
||||||
* allow safe invoke of bdrv_inactivate_all() at anytime, like
|
|
||||||
* bdrv_activate_all().
|
|
||||||
*
|
|
||||||
* For freshly started QEMU, the flag is initialized to TRUE reflecting the
|
|
||||||
* scenario where QEMU owns block device ownerships.
|
|
||||||
*
|
|
||||||
* For incoming QEMU taking a migration stream, the flag is initialized to
|
|
||||||
* FALSE reflecting that the incoming side doesn't own the block devices,
|
|
||||||
* not until switchover happens.
|
|
||||||
*/
|
|
||||||
static bool migration_block_active;
|
|
||||||
|
|
||||||
/* Setup the disk activation status */
|
|
||||||
void migration_block_active_setup(bool active)
|
|
||||||
{
|
|
||||||
migration_block_active = active;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool migration_block_activate(Error **errp)
|
bool migration_block_activate(Error **errp)
|
||||||
{
|
{
|
||||||
ERRP_GUARD();
|
ERRP_GUARD();
|
||||||
|
|
||||||
assert(bql_locked());
|
assert(bql_locked());
|
||||||
|
|
||||||
if (migration_block_active) {
|
|
||||||
trace_migration_block_activation("active-skipped");
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
trace_migration_block_activation("active");
|
trace_migration_block_activation("active");
|
||||||
|
|
||||||
bdrv_activate_all(errp);
|
bdrv_activate_all(errp);
|
||||||
|
@ -65,7 +26,6 @@ bool migration_block_activate(Error **errp)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
migration_block_active = true;
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -75,11 +35,6 @@ bool migration_block_inactivate(void)
|
||||||
|
|
||||||
assert(bql_locked());
|
assert(bql_locked());
|
||||||
|
|
||||||
if (!migration_block_active) {
|
|
||||||
trace_migration_block_activation("inactive-skipped");
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
trace_migration_block_activation("inactive");
|
trace_migration_block_activation("inactive");
|
||||||
|
|
||||||
ret = bdrv_inactivate_all();
|
ret = bdrv_inactivate_all();
|
||||||
|
@ -89,6 +44,5 @@ bool migration_block_inactivate(void)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
migration_block_active = false;
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1895,12 +1895,6 @@ void qmp_migrate_incoming(const char *uri, bool has_channels,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Newly setup incoming QEMU. Mark the block active state to reflect
|
|
||||||
* that the src currently owns the disks.
|
|
||||||
*/
|
|
||||||
migration_block_active_setup(false);
|
|
||||||
|
|
||||||
once = false;
|
once = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3992,8 +3986,6 @@ static void migration_instance_init(Object *obj)
|
||||||
ms->state = MIGRATION_STATUS_NONE;
|
ms->state = MIGRATION_STATUS_NONE;
|
||||||
ms->mbps = -1;
|
ms->mbps = -1;
|
||||||
ms->pages_per_second = -1;
|
ms->pages_per_second = -1;
|
||||||
/* Freshly started QEMU owns all the block devices */
|
|
||||||
migration_block_active_setup(true);
|
|
||||||
qemu_sem_init(&ms->pause_sem, 0);
|
qemu_sem_init(&ms->pause_sem, 0);
|
||||||
qemu_mutex_init(&ms->error_mutex);
|
qemu_mutex_init(&ms->error_mutex);
|
||||||
|
|
||||||
|
|
|
@ -554,7 +554,4 @@ void migration_bitmap_sync_precopy(bool last_stage);
|
||||||
void dirty_bitmap_mig_init(void);
|
void dirty_bitmap_mig_init(void);
|
||||||
bool should_send_vmdesc(void);
|
bool should_send_vmdesc(void);
|
||||||
|
|
||||||
/* migration/block-active.c */
|
|
||||||
void migration_block_active_setup(bool active);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
17
nbd/server.c
17
nbd/server.c
|
@ -2026,6 +2026,7 @@ static void nbd_export_delete(BlockExport *blk_exp)
|
||||||
const BlockExportDriver blk_exp_nbd = {
|
const BlockExportDriver blk_exp_nbd = {
|
||||||
.type = BLOCK_EXPORT_TYPE_NBD,
|
.type = BLOCK_EXPORT_TYPE_NBD,
|
||||||
.instance_size = sizeof(NBDExport),
|
.instance_size = sizeof(NBDExport),
|
||||||
|
.supports_inactive = true,
|
||||||
.create = nbd_export_create,
|
.create = nbd_export_create,
|
||||||
.delete = nbd_export_delete,
|
.delete = nbd_export_delete,
|
||||||
.request_shutdown = nbd_export_request_shutdown,
|
.request_shutdown = nbd_export_request_shutdown,
|
||||||
|
@ -2920,6 +2921,22 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
|
||||||
NBDExport *exp = client->exp;
|
NBDExport *exp = client->exp;
|
||||||
char *msg;
|
char *msg;
|
||||||
size_t i;
|
size_t i;
|
||||||
|
bool inactive;
|
||||||
|
|
||||||
|
WITH_GRAPH_RDLOCK_GUARD() {
|
||||||
|
inactive = bdrv_is_inactive(blk_bs(exp->common.blk));
|
||||||
|
if (inactive) {
|
||||||
|
switch (request->type) {
|
||||||
|
case NBD_CMD_READ:
|
||||||
|
/* These commands are allowed on inactive nodes */
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
/* Return an error for the rest */
|
||||||
|
return nbd_send_generic_reply(client, request, -EPERM,
|
||||||
|
"export is inactive", errp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
switch (request->type) {
|
switch (request->type) {
|
||||||
case NBD_CMD_CACHE:
|
case NBD_CMD_CACHE:
|
||||||
|
|
|
@ -486,6 +486,10 @@
|
||||||
# @backing_file_depth: number of files in the backing file chain
|
# @backing_file_depth: number of files in the backing file chain
|
||||||
# (since: 1.2)
|
# (since: 1.2)
|
||||||
#
|
#
|
||||||
|
# @active: true if the backend is active; typical cases for inactive backends
|
||||||
|
# are on the migration source instance after migration completes and on the
|
||||||
|
# destination before it completes. (since: 10.0)
|
||||||
|
#
|
||||||
# @encrypted: true if the backing device is encrypted
|
# @encrypted: true if the backing device is encrypted
|
||||||
#
|
#
|
||||||
# @detect_zeroes: detect and optimize zero writes (Since 2.1)
|
# @detect_zeroes: detect and optimize zero writes (Since 2.1)
|
||||||
|
@ -556,7 +560,7 @@
|
||||||
{ 'struct': 'BlockDeviceInfo',
|
{ 'struct': 'BlockDeviceInfo',
|
||||||
'data': { 'file': 'str', '*node-name': 'str', 'ro': 'bool', 'drv': 'str',
|
'data': { 'file': 'str', '*node-name': 'str', 'ro': 'bool', 'drv': 'str',
|
||||||
'*backing_file': 'str', 'backing_file_depth': 'int',
|
'*backing_file': 'str', 'backing_file_depth': 'int',
|
||||||
'encrypted': 'bool',
|
'active': 'bool', 'encrypted': 'bool',
|
||||||
'detect_zeroes': 'BlockdevDetectZeroesOptions',
|
'detect_zeroes': 'BlockdevDetectZeroesOptions',
|
||||||
'bps': 'int', 'bps_rd': 'int', 'bps_wr': 'int',
|
'bps': 'int', 'bps_rd': 'int', 'bps_wr': 'int',
|
||||||
'iops': 'int', 'iops_rd': 'int', 'iops_wr': 'int',
|
'iops': 'int', 'iops_rd': 'int', 'iops_wr': 'int',
|
||||||
|
@ -4679,6 +4683,11 @@
|
||||||
#
|
#
|
||||||
# @cache: cache-related options
|
# @cache: cache-related options
|
||||||
#
|
#
|
||||||
|
# @active: whether the block node should be activated (default: true).
|
||||||
|
# Having inactive block nodes is useful primarily for migration because it
|
||||||
|
# allows opening an image on the destination while the source is still
|
||||||
|
# holding locks for it. (Since 10.0)
|
||||||
|
#
|
||||||
# @read-only: whether the block device should be read-only (default:
|
# @read-only: whether the block device should be read-only (default:
|
||||||
# false). Note that some block drivers support only read-only
|
# false). Note that some block drivers support only read-only
|
||||||
# access, either generally or in certain configurations. In this
|
# access, either generally or in certain configurations. In this
|
||||||
|
@ -4705,6 +4714,7 @@
|
||||||
'*node-name': 'str',
|
'*node-name': 'str',
|
||||||
'*discard': 'BlockdevDiscardOptions',
|
'*discard': 'BlockdevDiscardOptions',
|
||||||
'*cache': 'BlockdevCacheOptions',
|
'*cache': 'BlockdevCacheOptions',
|
||||||
|
'*active': 'bool',
|
||||||
'*read-only': 'bool',
|
'*read-only': 'bool',
|
||||||
'*auto-read-only': 'bool',
|
'*auto-read-only': 'bool',
|
||||||
'*force-share': 'bool',
|
'*force-share': 'bool',
|
||||||
|
@ -4935,6 +4945,38 @@
|
||||||
{ 'command': 'blockdev-del', 'data': { 'node-name': 'str' },
|
{ 'command': 'blockdev-del', 'data': { 'node-name': 'str' },
|
||||||
'allow-preconfig': true }
|
'allow-preconfig': true }
|
||||||
|
|
||||||
|
##
|
||||||
|
# @blockdev-set-active:
|
||||||
|
#
|
||||||
|
# Activate or inactivate a block device. Use this to manage the handover of
|
||||||
|
# block devices on migration with qemu-storage-daemon.
|
||||||
|
#
|
||||||
|
# Activating a node automatically activates all of its child nodes first.
|
||||||
|
# Inactivating a node automatically inactivates any of its child nodes that are
|
||||||
|
# not in use by a still active node.
|
||||||
|
#
|
||||||
|
# @node-name: Name of the graph node to activate or inactivate. By default, all
|
||||||
|
# nodes are affected by the operation.
|
||||||
|
#
|
||||||
|
# @active: true if the nodes should be active when the command returns success,
|
||||||
|
# false if they should be inactive.
|
||||||
|
#
|
||||||
|
# Since: 10.0
|
||||||
|
#
|
||||||
|
# .. qmp-example::
|
||||||
|
#
|
||||||
|
# -> { "execute": "blockdev-set-active",
|
||||||
|
# "arguments": {
|
||||||
|
# "node-name": "node0",
|
||||||
|
# "active": false
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
# <- { "return": {} }
|
||||||
|
##
|
||||||
|
{ 'command': 'blockdev-set-active',
|
||||||
|
'data': { '*node-name': 'str', 'active': 'bool' },
|
||||||
|
'allow-preconfig': true }
|
||||||
|
|
||||||
##
|
##
|
||||||
# @BlockdevCreateOptionsFile:
|
# @BlockdevCreateOptionsFile:
|
||||||
#
|
#
|
||||||
|
|
|
@ -372,6 +372,13 @@
|
||||||
# cannot be moved to the iothread. The default is false.
|
# cannot be moved to the iothread. The default is false.
|
||||||
# (since: 5.2)
|
# (since: 5.2)
|
||||||
#
|
#
|
||||||
|
# @allow-inactive: If true, the export allows the exported node to be inactive.
|
||||||
|
# If it is created for an inactive block node, the node remains inactive. If
|
||||||
|
# the export type doesn't support running on an inactive node, an error is
|
||||||
|
# returned. If false, inactive block nodes are automatically activated before
|
||||||
|
# creating the export and trying to inactivate them later fails.
|
||||||
|
# (since: 10.0; default: false)
|
||||||
|
#
|
||||||
# Since: 4.2
|
# Since: 4.2
|
||||||
##
|
##
|
||||||
{ 'union': 'BlockExportOptions',
|
{ 'union': 'BlockExportOptions',
|
||||||
|
@ -381,7 +388,8 @@
|
||||||
'*iothread': 'str',
|
'*iothread': 'str',
|
||||||
'node-name': 'str',
|
'node-name': 'str',
|
||||||
'*writable': 'bool',
|
'*writable': 'bool',
|
||||||
'*writethrough': 'bool' },
|
'*writethrough': 'bool',
|
||||||
|
'*allow-inactive': 'bool' },
|
||||||
'discriminator': 'type',
|
'discriminator': 'type',
|
||||||
'data': {
|
'data': {
|
||||||
'nbd': 'BlockExportOptionsNbd',
|
'nbd': 'BlockExportOptionsNbd',
|
||||||
|
|
|
@ -45,3 +45,5 @@ coroutine.CoroutineBt()
|
||||||
# Default to silently passing through SIGUSR1, because QEMU sends it
|
# Default to silently passing through SIGUSR1, because QEMU sends it
|
||||||
# to itself a lot.
|
# to itself a lot.
|
||||||
gdb.execute('handle SIGUSR1 pass noprint nostop')
|
gdb.execute('handle SIGUSR1 pass noprint nostop')
|
||||||
|
# Always print full stack for python errors, easier to debug and report issues
|
||||||
|
gdb.execute('set python print-stack full')
|
||||||
|
|
|
@ -13,28 +13,9 @@ import gdb
|
||||||
|
|
||||||
VOID_PTR = gdb.lookup_type('void').pointer()
|
VOID_PTR = gdb.lookup_type('void').pointer()
|
||||||
|
|
||||||
def get_fs_base():
|
|
||||||
'''Fetch %fs base value using arch_prctl(ARCH_GET_FS). This is
|
|
||||||
pthread_self().'''
|
|
||||||
# %rsp - 120 is scratch space according to the SystemV ABI
|
|
||||||
old = gdb.parse_and_eval('*(uint64_t*)($rsp - 120)')
|
|
||||||
gdb.execute('call (int)arch_prctl(0x1003, $rsp - 120)', False, True)
|
|
||||||
fs_base = gdb.parse_and_eval('*(uint64_t*)($rsp - 120)')
|
|
||||||
gdb.execute('set *(uint64_t*)($rsp - 120) = %s' % old, False, True)
|
|
||||||
return fs_base
|
|
||||||
|
|
||||||
def pthread_self():
|
def pthread_self():
|
||||||
'''Fetch pthread_self() from the glibc start_thread function.'''
|
'''Fetch the base address of TLS.'''
|
||||||
f = gdb.newest_frame()
|
return gdb.parse_and_eval("$fs_base")
|
||||||
while f.name() != 'start_thread':
|
|
||||||
f = f.older()
|
|
||||||
if f is None:
|
|
||||||
return get_fs_base()
|
|
||||||
|
|
||||||
try:
|
|
||||||
return f.read_var("arg")
|
|
||||||
except ValueError:
|
|
||||||
return get_fs_base()
|
|
||||||
|
|
||||||
def get_glibc_pointer_guard():
|
def get_glibc_pointer_guard():
|
||||||
'''Fetch glibc pointer guard value'''
|
'''Fetch glibc pointer guard value'''
|
||||||
|
@ -65,9 +46,60 @@ def get_jmpbuf_regs(jmpbuf):
|
||||||
'r15': jmpbuf[JB_R15],
|
'r15': jmpbuf[JB_R15],
|
||||||
'rip': glibc_ptr_demangle(jmpbuf[JB_PC], pointer_guard) }
|
'rip': glibc_ptr_demangle(jmpbuf[JB_PC], pointer_guard) }
|
||||||
|
|
||||||
def bt_jmpbuf(jmpbuf):
|
def symbol_lookup(addr):
|
||||||
'''Backtrace a jmpbuf'''
|
# Example: "__clone3 + 44 in section .text of /lib64/libc.so.6"
|
||||||
regs = get_jmpbuf_regs(jmpbuf)
|
result = gdb.execute(f"info symbol {hex(addr)}", to_string=True).strip()
|
||||||
|
try:
|
||||||
|
if "+" in result:
|
||||||
|
(func, result) = result.split(" + ")
|
||||||
|
(offset, result) = result.split(" in ")
|
||||||
|
else:
|
||||||
|
offset = "0"
|
||||||
|
(func, result) = result.split(" in ")
|
||||||
|
func_str = f"{func}<+{offset}> ()"
|
||||||
|
except:
|
||||||
|
return f"??? ({result})"
|
||||||
|
|
||||||
|
# Example: Line 321 of "../util/coroutine-ucontext.c" starts at address
|
||||||
|
# 0x55cf3894d993 <qemu_coroutine_switch+99> and ends at 0x55cf3894d9ab
|
||||||
|
# <qemu_coroutine_switch+123>.
|
||||||
|
result = gdb.execute(f"info line *{hex(addr)}", to_string=True).strip()
|
||||||
|
if not result.startswith("Line "):
|
||||||
|
return func_str
|
||||||
|
result = result[5:]
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = result.split(" starts ")[0]
|
||||||
|
(line, path) = result.split(" of ")
|
||||||
|
path = path.replace("\"", "")
|
||||||
|
except:
|
||||||
|
return func_str
|
||||||
|
|
||||||
|
return f"{func_str} at {path}:{line}"
|
||||||
|
|
||||||
|
def dump_backtrace(regs):
|
||||||
|
'''
|
||||||
|
Backtrace dump with raw registers, mimic GDB command 'bt'.
|
||||||
|
'''
|
||||||
|
# Here only rbp and rip that matter..
|
||||||
|
rbp = regs['rbp']
|
||||||
|
rip = regs['rip']
|
||||||
|
i = 0
|
||||||
|
|
||||||
|
while rbp:
|
||||||
|
# For all return addresses on stack, we want to look up symbol/line
|
||||||
|
# on the CALL command, because the return address is the next
|
||||||
|
# instruction instead of the CALL. Here -1 would work for any
|
||||||
|
# sized CALL instruction.
|
||||||
|
print(f"#{i} {hex(rip)} in {symbol_lookup(rip if i == 0 else rip-1)}")
|
||||||
|
rip = gdb.parse_and_eval(f"*(uint64_t *)(uint64_t)({hex(rbp)} + 8)")
|
||||||
|
rbp = gdb.parse_and_eval(f"*(uint64_t *)(uint64_t)({hex(rbp)})")
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
def dump_backtrace_live(regs):
|
||||||
|
'''
|
||||||
|
Backtrace dump with gdb's 'bt' command, only usable in a live session.
|
||||||
|
'''
|
||||||
old = dict()
|
old = dict()
|
||||||
|
|
||||||
# remember current stack frame and select the topmost
|
# remember current stack frame and select the topmost
|
||||||
|
@ -88,6 +120,17 @@ def bt_jmpbuf(jmpbuf):
|
||||||
|
|
||||||
selected_frame.select()
|
selected_frame.select()
|
||||||
|
|
||||||
|
def bt_jmpbuf(jmpbuf):
|
||||||
|
'''Backtrace a jmpbuf'''
|
||||||
|
regs = get_jmpbuf_regs(jmpbuf)
|
||||||
|
try:
|
||||||
|
# This reuses gdb's "bt" command, which can be slightly prettier
|
||||||
|
# but only works with live sessions.
|
||||||
|
dump_backtrace_live(regs)
|
||||||
|
except:
|
||||||
|
# If above doesn't work, fallback to poor man's unwind
|
||||||
|
dump_backtrace(regs)
|
||||||
|
|
||||||
def co_cast(co):
|
def co_cast(co):
|
||||||
return co.cast(gdb.lookup_type('CoroutineUContext').pointer())
|
return co.cast(gdb.lookup_type('CoroutineUContext').pointer())
|
||||||
|
|
||||||
|
@ -120,10 +163,15 @@ class CoroutineBt(gdb.Command):
|
||||||
|
|
||||||
gdb.execute("bt")
|
gdb.execute("bt")
|
||||||
|
|
||||||
if gdb.parse_and_eval("qemu_in_coroutine()") == False:
|
try:
|
||||||
return
|
# This only works with a live session
|
||||||
|
co_ptr = gdb.parse_and_eval("qemu_coroutine_self()")
|
||||||
|
except:
|
||||||
|
# Fallback to use hard-coded ucontext vars if it's coredump
|
||||||
|
co_ptr = gdb.parse_and_eval("co_tls_current")
|
||||||
|
|
||||||
co_ptr = gdb.parse_and_eval("qemu_coroutine_self()")
|
if co_ptr == False:
|
||||||
|
return
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
co = co_cast(co_ptr)
|
co = co_cast(co_ptr)
|
||||||
|
|
|
@ -1100,10 +1100,8 @@ class TestRepairQuorum(iotests.QMPTestCase):
|
||||||
|
|
||||||
# Check the full error message now
|
# Check the full error message now
|
||||||
self.vm.shutdown()
|
self.vm.shutdown()
|
||||||
log = self.vm.get_log()
|
log = iotests.filter_qtest(self.vm.get_log())
|
||||||
log = re.sub(r'^\[I \d+\.\d+\] OPENED\n', '', log)
|
|
||||||
log = re.sub(r'^Formatting.*\n', '', log)
|
log = re.sub(r'^Formatting.*\n', '', log)
|
||||||
log = re.sub(r'\n\[I \+\d+\.\d+\] CLOSED\n?$', '', log)
|
|
||||||
log = re.sub(r'^%s: ' % os.path.basename(iotests.qemu_prog), '', log)
|
log = re.sub(r'^%s: ' % os.path.basename(iotests.qemu_prog), '', log)
|
||||||
|
|
||||||
self.assertEqual(log,
|
self.assertEqual(log,
|
||||||
|
|
|
@ -82,9 +82,7 @@ class TestPersistentDirtyBitmap(iotests.QMPTestCase):
|
||||||
self.vm.shutdown()
|
self.vm.shutdown()
|
||||||
|
|
||||||
#catch 'Persistent bitmaps are lost' possible error
|
#catch 'Persistent bitmaps are lost' possible error
|
||||||
log = self.vm.get_log()
|
log = iotests.filter_qtest(self.vm.get_log())
|
||||||
log = re.sub(r'^\[I \d+\.\d+\] OPENED\n', '', log)
|
|
||||||
log = re.sub(r'\[I \+\d+\.\d+\] CLOSED\n?$', '', log)
|
|
||||||
if log:
|
if log:
|
||||||
print(log)
|
print(log)
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,7 @@ Testing:
|
||||||
{
|
{
|
||||||
"iops_rd": 0,
|
"iops_rd": 0,
|
||||||
"detect_zeroes": "off",
|
"detect_zeroes": "off",
|
||||||
|
"active": true,
|
||||||
"image": {
|
"image": {
|
||||||
"backing-image": {
|
"backing-image": {
|
||||||
"virtual-size": 1073741824,
|
"virtual-size": 1073741824,
|
||||||
|
@ -59,6 +60,7 @@ Testing:
|
||||||
{
|
{
|
||||||
"iops_rd": 0,
|
"iops_rd": 0,
|
||||||
"detect_zeroes": "off",
|
"detect_zeroes": "off",
|
||||||
|
"active": true,
|
||||||
"image": {
|
"image": {
|
||||||
"virtual-size": 1073741824,
|
"virtual-size": 1073741824,
|
||||||
"filename": "null-co://",
|
"filename": "null-co://",
|
||||||
|
|
|
@ -114,6 +114,7 @@ wrote 65536/65536 bytes at offset 1048576
|
||||||
{
|
{
|
||||||
"iops_rd": 0,
|
"iops_rd": 0,
|
||||||
"detect_zeroes": "off",
|
"detect_zeroes": "off",
|
||||||
|
"active": true,
|
||||||
"image": {
|
"image": {
|
||||||
"backing-image": {
|
"backing-image": {
|
||||||
"virtual-size": 67108864,
|
"virtual-size": 67108864,
|
||||||
|
@ -155,6 +156,7 @@ wrote 65536/65536 bytes at offset 1048576
|
||||||
{
|
{
|
||||||
"iops_rd": 0,
|
"iops_rd": 0,
|
||||||
"detect_zeroes": "off",
|
"detect_zeroes": "off",
|
||||||
|
"active": true,
|
||||||
"image": {
|
"image": {
|
||||||
"virtual-size": 197120,
|
"virtual-size": 197120,
|
||||||
"filename": "TEST_DIR/t.IMGFMT.ovl2",
|
"filename": "TEST_DIR/t.IMGFMT.ovl2",
|
||||||
|
@ -183,6 +185,7 @@ wrote 65536/65536 bytes at offset 1048576
|
||||||
{
|
{
|
||||||
"iops_rd": 0,
|
"iops_rd": 0,
|
||||||
"detect_zeroes": "off",
|
"detect_zeroes": "off",
|
||||||
|
"active": true,
|
||||||
"image": {
|
"image": {
|
||||||
"backing-image": {
|
"backing-image": {
|
||||||
"virtual-size": 67108864,
|
"virtual-size": 67108864,
|
||||||
|
@ -224,6 +227,7 @@ wrote 65536/65536 bytes at offset 1048576
|
||||||
{
|
{
|
||||||
"iops_rd": 0,
|
"iops_rd": 0,
|
||||||
"detect_zeroes": "off",
|
"detect_zeroes": "off",
|
||||||
|
"active": true,
|
||||||
"image": {
|
"image": {
|
||||||
"virtual-size": 197120,
|
"virtual-size": 197120,
|
||||||
"filename": "TEST_DIR/t.IMGFMT",
|
"filename": "TEST_DIR/t.IMGFMT",
|
||||||
|
@ -252,6 +256,7 @@ wrote 65536/65536 bytes at offset 1048576
|
||||||
{
|
{
|
||||||
"iops_rd": 0,
|
"iops_rd": 0,
|
||||||
"detect_zeroes": "off",
|
"detect_zeroes": "off",
|
||||||
|
"active": true,
|
||||||
"image": {
|
"image": {
|
||||||
"backing-image": {
|
"backing-image": {
|
||||||
"virtual-size": 67108864,
|
"virtual-size": 67108864,
|
||||||
|
@ -293,6 +298,7 @@ wrote 65536/65536 bytes at offset 1048576
|
||||||
{
|
{
|
||||||
"iops_rd": 0,
|
"iops_rd": 0,
|
||||||
"detect_zeroes": "off",
|
"detect_zeroes": "off",
|
||||||
|
"active": true,
|
||||||
"image": {
|
"image": {
|
||||||
"virtual-size": 393216,
|
"virtual-size": 393216,
|
||||||
"filename": "TEST_DIR/t.IMGFMT.mid",
|
"filename": "TEST_DIR/t.IMGFMT.mid",
|
||||||
|
@ -321,6 +327,7 @@ wrote 65536/65536 bytes at offset 1048576
|
||||||
{
|
{
|
||||||
"iops_rd": 0,
|
"iops_rd": 0,
|
||||||
"detect_zeroes": "off",
|
"detect_zeroes": "off",
|
||||||
|
"active": true,
|
||||||
"image": {
|
"image": {
|
||||||
"virtual-size": 67108864,
|
"virtual-size": 67108864,
|
||||||
"filename": "TEST_DIR/t.IMGFMT.base",
|
"filename": "TEST_DIR/t.IMGFMT.base",
|
||||||
|
@ -350,6 +357,7 @@ wrote 65536/65536 bytes at offset 1048576
|
||||||
{
|
{
|
||||||
"iops_rd": 0,
|
"iops_rd": 0,
|
||||||
"detect_zeroes": "off",
|
"detect_zeroes": "off",
|
||||||
|
"active": true,
|
||||||
"image": {
|
"image": {
|
||||||
"virtual-size": 393216,
|
"virtual-size": 393216,
|
||||||
"filename": "TEST_DIR/t.IMGFMT.base",
|
"filename": "TEST_DIR/t.IMGFMT.base",
|
||||||
|
@ -521,6 +529,7 @@ wrote 65536/65536 bytes at offset 1048576
|
||||||
{
|
{
|
||||||
"iops_rd": 0,
|
"iops_rd": 0,
|
||||||
"detect_zeroes": "off",
|
"detect_zeroes": "off",
|
||||||
|
"active": true,
|
||||||
"image": {
|
"image": {
|
||||||
"backing-image": {
|
"backing-image": {
|
||||||
"virtual-size": 67108864,
|
"virtual-size": 67108864,
|
||||||
|
@ -562,6 +571,7 @@ wrote 65536/65536 bytes at offset 1048576
|
||||||
{
|
{
|
||||||
"iops_rd": 0,
|
"iops_rd": 0,
|
||||||
"detect_zeroes": "off",
|
"detect_zeroes": "off",
|
||||||
|
"active": true,
|
||||||
"image": {
|
"image": {
|
||||||
"virtual-size": 197120,
|
"virtual-size": 197120,
|
||||||
"filename": "TEST_DIR/t.IMGFMT.ovl2",
|
"filename": "TEST_DIR/t.IMGFMT.ovl2",
|
||||||
|
@ -590,6 +600,7 @@ wrote 65536/65536 bytes at offset 1048576
|
||||||
{
|
{
|
||||||
"iops_rd": 0,
|
"iops_rd": 0,
|
||||||
"detect_zeroes": "off",
|
"detect_zeroes": "off",
|
||||||
|
"active": true,
|
||||||
"image": {
|
"image": {
|
||||||
"backing-image": {
|
"backing-image": {
|
||||||
"backing-image": {
|
"backing-image": {
|
||||||
|
@ -642,6 +653,7 @@ wrote 65536/65536 bytes at offset 1048576
|
||||||
{
|
{
|
||||||
"iops_rd": 0,
|
"iops_rd": 0,
|
||||||
"detect_zeroes": "off",
|
"detect_zeroes": "off",
|
||||||
|
"active": true,
|
||||||
"image": {
|
"image": {
|
||||||
"virtual-size": 197120,
|
"virtual-size": 197120,
|
||||||
"filename": "TEST_DIR/t.IMGFMT.ovl3",
|
"filename": "TEST_DIR/t.IMGFMT.ovl3",
|
||||||
|
@ -670,6 +682,7 @@ wrote 65536/65536 bytes at offset 1048576
|
||||||
{
|
{
|
||||||
"iops_rd": 0,
|
"iops_rd": 0,
|
||||||
"detect_zeroes": "off",
|
"detect_zeroes": "off",
|
||||||
|
"active": true,
|
||||||
"image": {
|
"image": {
|
||||||
"virtual-size": 67108864,
|
"virtual-size": 67108864,
|
||||||
"filename": "TEST_DIR/t.IMGFMT.base",
|
"filename": "TEST_DIR/t.IMGFMT.base",
|
||||||
|
@ -699,6 +712,7 @@ wrote 65536/65536 bytes at offset 1048576
|
||||||
{
|
{
|
||||||
"iops_rd": 0,
|
"iops_rd": 0,
|
||||||
"detect_zeroes": "off",
|
"detect_zeroes": "off",
|
||||||
|
"active": true,
|
||||||
"image": {
|
"image": {
|
||||||
"virtual-size": 393216,
|
"virtual-size": 393216,
|
||||||
"filename": "TEST_DIR/t.IMGFMT.base",
|
"filename": "TEST_DIR/t.IMGFMT.base",
|
||||||
|
@ -727,6 +741,7 @@ wrote 65536/65536 bytes at offset 1048576
|
||||||
{
|
{
|
||||||
"iops_rd": 0,
|
"iops_rd": 0,
|
||||||
"detect_zeroes": "off",
|
"detect_zeroes": "off",
|
||||||
|
"active": true,
|
||||||
"image": {
|
"image": {
|
||||||
"backing-image": {
|
"backing-image": {
|
||||||
"virtual-size": 67108864,
|
"virtual-size": 67108864,
|
||||||
|
@ -768,6 +783,7 @@ wrote 65536/65536 bytes at offset 1048576
|
||||||
{
|
{
|
||||||
"iops_rd": 0,
|
"iops_rd": 0,
|
||||||
"detect_zeroes": "off",
|
"detect_zeroes": "off",
|
||||||
|
"active": true,
|
||||||
"image": {
|
"image": {
|
||||||
"virtual-size": 197120,
|
"virtual-size": 197120,
|
||||||
"filename": "TEST_DIR/t.IMGFMT",
|
"filename": "TEST_DIR/t.IMGFMT",
|
||||||
|
|
|
@ -23,6 +23,7 @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev
|
||||||
{
|
{
|
||||||
"iops_rd": 0,
|
"iops_rd": 0,
|
||||||
"detect_zeroes": "off",
|
"detect_zeroes": "off",
|
||||||
|
"active": true,
|
||||||
"image": {
|
"image": {
|
||||||
"backing-image": {
|
"backing-image": {
|
||||||
"backing-image": {
|
"backing-image": {
|
||||||
|
@ -74,6 +75,7 @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev
|
||||||
{
|
{
|
||||||
"iops_rd": 0,
|
"iops_rd": 0,
|
||||||
"detect_zeroes": "off",
|
"detect_zeroes": "off",
|
||||||
|
"active": true,
|
||||||
"image": {
|
"image": {
|
||||||
"virtual-size": 197120,
|
"virtual-size": 197120,
|
||||||
"filename": "TEST_DIR/t.IMGFMT",
|
"filename": "TEST_DIR/t.IMGFMT",
|
||||||
|
@ -102,6 +104,7 @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev
|
||||||
{
|
{
|
||||||
"iops_rd": 0,
|
"iops_rd": 0,
|
||||||
"detect_zeroes": "off",
|
"detect_zeroes": "off",
|
||||||
|
"active": true,
|
||||||
"image": {
|
"image": {
|
||||||
"backing-image": {
|
"backing-image": {
|
||||||
"virtual-size": 197120,
|
"virtual-size": 197120,
|
||||||
|
@ -142,6 +145,7 @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev
|
||||||
{
|
{
|
||||||
"iops_rd": 0,
|
"iops_rd": 0,
|
||||||
"detect_zeroes": "off",
|
"detect_zeroes": "off",
|
||||||
|
"active": true,
|
||||||
"image": {
|
"image": {
|
||||||
"virtual-size": 197120,
|
"virtual-size": 197120,
|
||||||
"filename": "TEST_DIR/t.IMGFMT.mid",
|
"filename": "TEST_DIR/t.IMGFMT.mid",
|
||||||
|
@ -170,6 +174,7 @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev
|
||||||
{
|
{
|
||||||
"iops_rd": 0,
|
"iops_rd": 0,
|
||||||
"detect_zeroes": "off",
|
"detect_zeroes": "off",
|
||||||
|
"active": true,
|
||||||
"image": {
|
"image": {
|
||||||
"virtual-size": 197120,
|
"virtual-size": 197120,
|
||||||
"filename": "TEST_DIR/t.IMGFMT.base",
|
"filename": "TEST_DIR/t.IMGFMT.base",
|
||||||
|
|
|
@ -701,6 +701,10 @@ def filter_qmp_imgfmt(qmsg):
|
||||||
def filter_nbd_exports(output: str) -> str:
|
def filter_nbd_exports(output: str) -> str:
|
||||||
return re.sub(r'((min|opt|max) block): [0-9]+', r'\1: XXX', output)
|
return re.sub(r'((min|opt|max) block): [0-9]+', r'\1: XXX', output)
|
||||||
|
|
||||||
|
def filter_qtest(output: str) -> str:
|
||||||
|
output = re.sub(r'^\[I \d+\.\d+\] OPENED\n', '', output)
|
||||||
|
output = re.sub(r'\n?\[I \+\d+\.\d+\] CLOSED\n?$', '', output)
|
||||||
|
return output
|
||||||
|
|
||||||
Msg = TypeVar('Msg', Dict[str, Any], List[Any], str)
|
Msg = TypeVar('Msg', Dict[str, Any], List[Any], str)
|
||||||
|
|
||||||
|
@ -909,6 +913,10 @@ class VM(qtest.QEMUQtestMachine):
|
||||||
self._args.append(addr)
|
self._args.append(addr)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
def add_paused(self):
|
||||||
|
self._args.append('-S')
|
||||||
|
return self
|
||||||
|
|
||||||
def hmp(self, command_line: str, use_log: bool = False) -> QMPMessage:
|
def hmp(self, command_line: str, use_log: bool = False) -> QMPMessage:
|
||||||
cmd = 'human-monitor-command'
|
cmd = 'human-monitor-command'
|
||||||
kwargs: Dict[str, Any] = {'command-line': command_line}
|
kwargs: Dict[str, Any] = {'command-line': command_line}
|
||||||
|
|
|
@ -95,8 +95,7 @@ class TestCbwError(iotests.QMPTestCase):
|
||||||
|
|
||||||
self.vm.shutdown()
|
self.vm.shutdown()
|
||||||
log = self.vm.get_log()
|
log = self.vm.get_log()
|
||||||
log = re.sub(r'^\[I \d+\.\d+\] OPENED\n', '', log)
|
log = iotests.filter_qtest(log)
|
||||||
log = re.sub(r'\[I \+\d+\.\d+\] CLOSED\n?$', '', log)
|
|
||||||
log = iotests.filter_qemu_io(log)
|
log = iotests.filter_qemu_io(log)
|
||||||
return log
|
return log
|
||||||
|
|
||||||
|
|
303
tests/qemu-iotests/tests/inactive-node-nbd
Executable file
303
tests/qemu-iotests/tests/inactive-node-nbd
Executable file
|
@ -0,0 +1,303 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
# group: rw quick
|
||||||
|
#
|
||||||
|
# Copyright (C) Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# This program is free software; you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation; either version 2 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
# Creator/Owner: Kevin Wolf <kwolf@redhat.com>
|
||||||
|
|
||||||
|
import iotests
|
||||||
|
|
||||||
|
from iotests import QemuIoInteractive
|
||||||
|
from iotests import filter_qemu_io, filter_qtest, filter_qmp_testfiles
|
||||||
|
|
||||||
|
iotests.script_initialize(supported_fmts=['generic'],
|
||||||
|
supported_protocols=['file'],
|
||||||
|
supported_platforms=['linux'])
|
||||||
|
|
||||||
|
def get_export(node_name='disk-fmt', allow_inactive=None):
|
||||||
|
exp = {
|
||||||
|
'id': 'exp0',
|
||||||
|
'type': 'nbd',
|
||||||
|
'node-name': node_name,
|
||||||
|
'writable': True,
|
||||||
|
}
|
||||||
|
|
||||||
|
if allow_inactive is not None:
|
||||||
|
exp['allow-inactive'] = allow_inactive
|
||||||
|
|
||||||
|
return exp
|
||||||
|
|
||||||
|
def node_is_active(_vm, node_name):
|
||||||
|
nodes = _vm.cmd('query-named-block-nodes', flat=True)
|
||||||
|
node = next(n for n in nodes if n['node-name'] == node_name)
|
||||||
|
return node['active']
|
||||||
|
|
||||||
|
with iotests.FilePath('disk.img') as path, \
|
||||||
|
iotests.FilePath('snap.qcow2') as snap_path, \
|
||||||
|
iotests.FilePath('snap2.qcow2') as snap2_path, \
|
||||||
|
iotests.FilePath('target.img') as target_path, \
|
||||||
|
iotests.FilePath('nbd.sock', base_dir=iotests.sock_dir) as nbd_sock, \
|
||||||
|
iotests.VM() as vm:
|
||||||
|
|
||||||
|
img_size = '10M'
|
||||||
|
|
||||||
|
iotests.log('Preparing disk...')
|
||||||
|
iotests.qemu_img_create('-f', iotests.imgfmt, path, img_size)
|
||||||
|
iotests.qemu_img_create('-f', iotests.imgfmt, target_path, img_size)
|
||||||
|
|
||||||
|
iotests.qemu_img_create('-f', 'qcow2', '-b', path, '-F', iotests.imgfmt,
|
||||||
|
snap_path)
|
||||||
|
iotests.qemu_img_create('-f', 'qcow2', '-b', snap_path, '-F', 'qcow2',
|
||||||
|
snap2_path)
|
||||||
|
|
||||||
|
iotests.log('Launching VM...')
|
||||||
|
vm.add_blockdev(f'file,node-name=disk-file,filename={path}')
|
||||||
|
vm.add_blockdev(f'{iotests.imgfmt},file=disk-file,node-name=disk-fmt,'
|
||||||
|
'active=off')
|
||||||
|
vm.add_blockdev(f'file,node-name=target-file,filename={target_path}')
|
||||||
|
vm.add_blockdev(f'{iotests.imgfmt},file=target-file,node-name=target-fmt')
|
||||||
|
vm.add_blockdev(f'file,node-name=snap-file,filename={snap_path}')
|
||||||
|
vm.add_blockdev(f'file,node-name=snap2-file,filename={snap2_path}')
|
||||||
|
|
||||||
|
# Actually running the VM activates all images
|
||||||
|
vm.add_paused()
|
||||||
|
|
||||||
|
vm.launch()
|
||||||
|
vm.qmp_log('nbd-server-start',
|
||||||
|
addr={'type': 'unix', 'data':{'path': nbd_sock}},
|
||||||
|
filters=[filter_qmp_testfiles])
|
||||||
|
|
||||||
|
iotests.log('\n=== Creating export of inactive node ===')
|
||||||
|
|
||||||
|
iotests.log('\nExports activate nodes without allow-inactive')
|
||||||
|
iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
|
||||||
|
vm.qmp_log('block-export-add', **get_export())
|
||||||
|
iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
|
||||||
|
vm.qmp_log('query-block-exports')
|
||||||
|
vm.qmp_log('block-export-del', id='exp0')
|
||||||
|
vm.event_wait('BLOCK_EXPORT_DELETED')
|
||||||
|
vm.qmp_log('query-block-exports')
|
||||||
|
|
||||||
|
iotests.log('\nExports activate nodes with allow-inactive=false')
|
||||||
|
vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False)
|
||||||
|
iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
|
||||||
|
vm.qmp_log('block-export-add', **get_export(allow_inactive=False))
|
||||||
|
iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
|
||||||
|
vm.qmp_log('query-block-exports')
|
||||||
|
vm.qmp_log('block-export-del', id='exp0')
|
||||||
|
vm.event_wait('BLOCK_EXPORT_DELETED')
|
||||||
|
vm.qmp_log('query-block-exports')
|
||||||
|
|
||||||
|
iotests.log('\nExport leaves nodes inactive with allow-inactive=true')
|
||||||
|
vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False)
|
||||||
|
iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
|
||||||
|
vm.qmp_log('block-export-add', **get_export(allow_inactive=True))
|
||||||
|
iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
|
||||||
|
vm.qmp_log('query-block-exports')
|
||||||
|
vm.qmp_log('block-export-del', id='exp0')
|
||||||
|
vm.event_wait('BLOCK_EXPORT_DELETED')
|
||||||
|
vm.qmp_log('query-block-exports')
|
||||||
|
|
||||||
|
iotests.log('\n=== Inactivating node with existing export ===')
|
||||||
|
|
||||||
|
iotests.log('\nInactivating nodes with an export fails without '
|
||||||
|
'allow-inactive')
|
||||||
|
vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=True)
|
||||||
|
vm.qmp_log('block-export-add', **get_export(node_name='disk-fmt'))
|
||||||
|
vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False)
|
||||||
|
iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
|
||||||
|
vm.qmp_log('query-block-exports')
|
||||||
|
vm.qmp_log('block-export-del', id='exp0')
|
||||||
|
vm.event_wait('BLOCK_EXPORT_DELETED')
|
||||||
|
vm.qmp_log('query-block-exports')
|
||||||
|
|
||||||
|
iotests.log('\nInactivating nodes with an export fails with '
|
||||||
|
'allow-inactive=false')
|
||||||
|
vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=True)
|
||||||
|
vm.qmp_log('block-export-add',
|
||||||
|
**get_export(node_name='disk-fmt', allow_inactive=False))
|
||||||
|
vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False)
|
||||||
|
iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
|
||||||
|
vm.qmp_log('query-block-exports')
|
||||||
|
vm.qmp_log('block-export-del', id='exp0')
|
||||||
|
vm.event_wait('BLOCK_EXPORT_DELETED')
|
||||||
|
vm.qmp_log('query-block-exports')
|
||||||
|
|
||||||
|
iotests.log('\nInactivating nodes with an export works with '
|
||||||
|
'allow-inactive=true')
|
||||||
|
vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=True)
|
||||||
|
vm.qmp_log('block-export-add',
|
||||||
|
**get_export(node_name='disk-fmt', allow_inactive=True))
|
||||||
|
vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False)
|
||||||
|
iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
|
||||||
|
vm.qmp_log('query-block-exports')
|
||||||
|
vm.qmp_log('block-export-del', id='exp0')
|
||||||
|
vm.event_wait('BLOCK_EXPORT_DELETED')
|
||||||
|
vm.qmp_log('query-block-exports')
|
||||||
|
|
||||||
|
iotests.log('\n=== Inactive nodes with parent ===')
|
||||||
|
|
||||||
|
iotests.log('\nInactivating nodes with an active parent fails')
|
||||||
|
vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=True)
|
||||||
|
vm.qmp_log('blockdev-set-active', node_name='disk-file', active=False)
|
||||||
|
iotests.log('disk-file active: %s' % node_is_active(vm, 'disk-file'))
|
||||||
|
iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
|
||||||
|
|
||||||
|
iotests.log('\nInactivating nodes with an inactive parent works')
|
||||||
|
vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False)
|
||||||
|
vm.qmp_log('blockdev-set-active', node_name='disk-file', active=False)
|
||||||
|
iotests.log('disk-file active: %s' % node_is_active(vm, 'disk-file'))
|
||||||
|
iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
|
||||||
|
|
||||||
|
iotests.log('\nCreating active parent node with an inactive child fails')
|
||||||
|
vm.qmp_log('blockdev-add', driver='raw', file='disk-fmt',
|
||||||
|
node_name='disk-filter')
|
||||||
|
vm.qmp_log('blockdev-add', driver='raw', file='disk-fmt',
|
||||||
|
node_name='disk-filter', active=True)
|
||||||
|
|
||||||
|
iotests.log('\nCreating inactive parent node with an inactive child works')
|
||||||
|
vm.qmp_log('blockdev-add', driver='raw', file='disk-fmt',
|
||||||
|
node_name='disk-filter', active=False)
|
||||||
|
vm.qmp_log('blockdev-del', node_name='disk-filter')
|
||||||
|
|
||||||
|
iotests.log('\n=== Resizing an inactive node ===')
|
||||||
|
vm.qmp_log('block_resize', node_name='disk-fmt', size=16*1024*1024)
|
||||||
|
|
||||||
|
iotests.log('\n=== Taking a snapshot of an inactive node ===')
|
||||||
|
|
||||||
|
iotests.log('\nActive overlay over inactive backing file automatically '
|
||||||
|
'makes both inactive for compatibility')
|
||||||
|
vm.qmp_log('blockdev-add', driver='qcow2', node_name='snap-fmt',
|
||||||
|
file='snap-file', backing=None)
|
||||||
|
iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
|
||||||
|
iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt'))
|
||||||
|
vm.qmp_log('blockdev-snapshot', node='disk-fmt', overlay='snap-fmt')
|
||||||
|
iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
|
||||||
|
iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt'))
|
||||||
|
vm.qmp_log('blockdev-del', node_name='snap-fmt')
|
||||||
|
|
||||||
|
iotests.log('\nInactive overlay over inactive backing file just works')
|
||||||
|
vm.qmp_log('blockdev-add', driver='qcow2', node_name='snap-fmt',
|
||||||
|
file='snap-file', backing=None, active=False)
|
||||||
|
vm.qmp_log('blockdev-snapshot', node='disk-fmt', overlay='snap-fmt')
|
||||||
|
|
||||||
|
iotests.log('\n=== Block jobs with inactive nodes ===')
|
||||||
|
|
||||||
|
iotests.log('\nStreaming into an inactive node')
|
||||||
|
vm.qmp_log('block-stream', device='snap-fmt',
|
||||||
|
filters=[iotests.filter_qmp_generated_node_ids])
|
||||||
|
|
||||||
|
iotests.log('\nCommitting an inactive root node (active commit)')
|
||||||
|
vm.qmp_log('block-commit', job_id='job0', device='snap-fmt',
|
||||||
|
filters=[iotests.filter_qmp_generated_node_ids])
|
||||||
|
|
||||||
|
iotests.log('\nCommitting an inactive intermediate node to inactive base')
|
||||||
|
vm.qmp_log('blockdev-add', driver='qcow2', node_name='snap2-fmt',
|
||||||
|
file='snap2-file', backing='snap-fmt', active=False)
|
||||||
|
|
||||||
|
iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
|
||||||
|
iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt'))
|
||||||
|
iotests.log('snap2-fmt active: %s' % node_is_active(vm, 'snap2-fmt'))
|
||||||
|
|
||||||
|
vm.qmp_log('block-commit', job_id='job0', device='snap2-fmt',
|
||||||
|
top_node='snap-fmt',
|
||||||
|
filters=[iotests.filter_qmp_generated_node_ids])
|
||||||
|
|
||||||
|
iotests.log('\nCommitting an inactive intermediate node to active base')
|
||||||
|
vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=True)
|
||||||
|
vm.qmp_log('block-commit', job_id='job0', device='snap2-fmt',
|
||||||
|
top_node='snap-fmt',
|
||||||
|
filters=[iotests.filter_qmp_generated_node_ids])
|
||||||
|
|
||||||
|
iotests.log('\nMirror from inactive source to active target')
|
||||||
|
vm.qmp_log('blockdev-mirror', job_id='job0', device='snap2-fmt',
|
||||||
|
target='target-fmt', sync='full',
|
||||||
|
filters=[iotests.filter_qmp_generated_node_ids])
|
||||||
|
|
||||||
|
iotests.log('\nMirror from active source to inactive target')
|
||||||
|
|
||||||
|
iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
|
||||||
|
iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt'))
|
||||||
|
iotests.log('snap2-fmt active: %s' % node_is_active(vm, 'snap2-fmt'))
|
||||||
|
iotests.log('target-fmt active: %s' % node_is_active(vm, 'target-fmt'))
|
||||||
|
|
||||||
|
# Activating snap2-fmt recursively activates the whole backing chain
|
||||||
|
vm.qmp_log('blockdev-set-active', node_name='snap2-fmt', active=True)
|
||||||
|
vm.qmp_log('blockdev-set-active', node_name='target-fmt', active=False)
|
||||||
|
|
||||||
|
iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
|
||||||
|
iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt'))
|
||||||
|
iotests.log('snap2-fmt active: %s' % node_is_active(vm, 'snap2-fmt'))
|
||||||
|
iotests.log('target-fmt active: %s' % node_is_active(vm, 'target-fmt'))
|
||||||
|
|
||||||
|
vm.qmp_log('blockdev-mirror', job_id='job0', device='snap2-fmt',
|
||||||
|
target='target-fmt', sync='full',
|
||||||
|
filters=[iotests.filter_qmp_generated_node_ids])
|
||||||
|
|
||||||
|
iotests.log('\nBackup from active source to inactive target')
|
||||||
|
|
||||||
|
vm.qmp_log('blockdev-backup', job_id='job0', device='snap2-fmt',
|
||||||
|
target='target-fmt', sync='full',
|
||||||
|
filters=[iotests.filter_qmp_generated_node_ids])
|
||||||
|
|
||||||
|
iotests.log('\nBackup from inactive source to active target')
|
||||||
|
|
||||||
|
# Inactivating snap2-fmt recursively inactivates the whole backing chain
|
||||||
|
vm.qmp_log('blockdev-set-active', node_name='snap2-fmt', active=False)
|
||||||
|
vm.qmp_log('blockdev-set-active', node_name='target-fmt', active=True)
|
||||||
|
|
||||||
|
iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
|
||||||
|
iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt'))
|
||||||
|
iotests.log('snap2-fmt active: %s' % node_is_active(vm, 'snap2-fmt'))
|
||||||
|
iotests.log('target-fmt active: %s' % node_is_active(vm, 'target-fmt'))
|
||||||
|
|
||||||
|
vm.qmp_log('blockdev-backup', job_id='job0', device='snap2-fmt',
|
||||||
|
target='target-fmt', sync='full',
|
||||||
|
filters=[iotests.filter_qmp_generated_node_ids])
|
||||||
|
|
||||||
|
iotests.log('\n=== Accessing export on inactive node ===')
|
||||||
|
|
||||||
|
# Use the target node because it has the right image format and isn't the
|
||||||
|
# (read-only) backing file of a qcow2 node
|
||||||
|
vm.qmp_log('blockdev-set-active', node_name='target-fmt', active=False)
|
||||||
|
vm.qmp_log('block-export-add',
|
||||||
|
**get_export(node_name='target-fmt', allow_inactive=True))
|
||||||
|
|
||||||
|
# The read should succeed, everything else should fail gracefully
|
||||||
|
qemu_io = QemuIoInteractive('-f', 'raw',
|
||||||
|
f'nbd+unix:///target-fmt?socket={nbd_sock}')
|
||||||
|
iotests.log(qemu_io.cmd('read 0 64k'), filters=[filter_qemu_io])
|
||||||
|
iotests.log(qemu_io.cmd('write 0 64k'), filters=[filter_qemu_io])
|
||||||
|
iotests.log(qemu_io.cmd('write -z 0 64k'), filters=[filter_qemu_io])
|
||||||
|
iotests.log(qemu_io.cmd('write -zu 0 64k'), filters=[filter_qemu_io])
|
||||||
|
iotests.log(qemu_io.cmd('discard 0 64k'), filters=[filter_qemu_io])
|
||||||
|
iotests.log(qemu_io.cmd('flush'), filters=[filter_qemu_io])
|
||||||
|
iotests.log(qemu_io.cmd('map'), filters=[filter_qemu_io])
|
||||||
|
qemu_io.close()
|
||||||
|
|
||||||
|
iotests.log('\n=== Resuming VM activates all images ===')
|
||||||
|
vm.qmp_log('cont')
|
||||||
|
|
||||||
|
iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt'))
|
||||||
|
iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt'))
|
||||||
|
iotests.log('snap2-fmt active: %s' % node_is_active(vm, 'snap2-fmt'))
|
||||||
|
iotests.log('target-fmt active: %s' % node_is_active(vm, 'target-fmt'))
|
||||||
|
|
||||||
|
iotests.log('\nShutting down...')
|
||||||
|
vm.shutdown()
|
||||||
|
log = vm.get_log()
|
||||||
|
if log:
|
||||||
|
iotests.log(log, [filter_qtest, filter_qemu_io])
|
239
tests/qemu-iotests/tests/inactive-node-nbd.out
Normal file
239
tests/qemu-iotests/tests/inactive-node-nbd.out
Normal file
|
@ -0,0 +1,239 @@
|
||||||
|
Preparing disk...
|
||||||
|
Launching VM...
|
||||||
|
{"execute": "nbd-server-start", "arguments": {"addr": {"data": {"path": "SOCK_DIR/PID-nbd.sock"}, "type": "unix"}}}
|
||||||
|
{"return": {}}
|
||||||
|
|
||||||
|
=== Creating export of inactive node ===
|
||||||
|
|
||||||
|
Exports activate nodes without allow-inactive
|
||||||
|
disk-fmt active: False
|
||||||
|
{"execute": "block-export-add", "arguments": {"id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}}
|
||||||
|
{"return": {}}
|
||||||
|
disk-fmt active: True
|
||||||
|
{"execute": "query-block-exports", "arguments": {}}
|
||||||
|
{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]}
|
||||||
|
{"execute": "block-export-del", "arguments": {"id": "exp0"}}
|
||||||
|
{"return": {}}
|
||||||
|
{"execute": "query-block-exports", "arguments": {}}
|
||||||
|
{"return": []}
|
||||||
|
|
||||||
|
Exports activate nodes with allow-inactive=false
|
||||||
|
{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}}
|
||||||
|
{"return": {}}
|
||||||
|
disk-fmt active: False
|
||||||
|
{"execute": "block-export-add", "arguments": {"allow-inactive": false, "id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}}
|
||||||
|
{"return": {}}
|
||||||
|
disk-fmt active: True
|
||||||
|
{"execute": "query-block-exports", "arguments": {}}
|
||||||
|
{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]}
|
||||||
|
{"execute": "block-export-del", "arguments": {"id": "exp0"}}
|
||||||
|
{"return": {}}
|
||||||
|
{"execute": "query-block-exports", "arguments": {}}
|
||||||
|
{"return": []}
|
||||||
|
|
||||||
|
Export leaves nodes inactive with allow-inactive=true
|
||||||
|
{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}}
|
||||||
|
{"return": {}}
|
||||||
|
disk-fmt active: False
|
||||||
|
{"execute": "block-export-add", "arguments": {"allow-inactive": true, "id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}}
|
||||||
|
{"return": {}}
|
||||||
|
disk-fmt active: False
|
||||||
|
{"execute": "query-block-exports", "arguments": {}}
|
||||||
|
{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]}
|
||||||
|
{"execute": "block-export-del", "arguments": {"id": "exp0"}}
|
||||||
|
{"return": {}}
|
||||||
|
{"execute": "query-block-exports", "arguments": {}}
|
||||||
|
{"return": []}
|
||||||
|
|
||||||
|
=== Inactivating node with existing export ===
|
||||||
|
|
||||||
|
Inactivating nodes with an export fails without allow-inactive
|
||||||
|
{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "disk-fmt"}}
|
||||||
|
{"return": {}}
|
||||||
|
{"execute": "block-export-add", "arguments": {"id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}}
|
||||||
|
{"return": {}}
|
||||||
|
{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}}
|
||||||
|
{"error": {"class": "GenericError", "desc": "Failed to inactivate node: Operation not permitted"}}
|
||||||
|
disk-fmt active: True
|
||||||
|
{"execute": "query-block-exports", "arguments": {}}
|
||||||
|
{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]}
|
||||||
|
{"execute": "block-export-del", "arguments": {"id": "exp0"}}
|
||||||
|
{"return": {}}
|
||||||
|
{"execute": "query-block-exports", "arguments": {}}
|
||||||
|
{"return": []}
|
||||||
|
|
||||||
|
Inactivating nodes with an export fails with allow-inactive=false
|
||||||
|
{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "disk-fmt"}}
|
||||||
|
{"return": {}}
|
||||||
|
{"execute": "block-export-add", "arguments": {"allow-inactive": false, "id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}}
|
||||||
|
{"return": {}}
|
||||||
|
{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}}
|
||||||
|
{"error": {"class": "GenericError", "desc": "Failed to inactivate node: Operation not permitted"}}
|
||||||
|
disk-fmt active: True
|
||||||
|
{"execute": "query-block-exports", "arguments": {}}
|
||||||
|
{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]}
|
||||||
|
{"execute": "block-export-del", "arguments": {"id": "exp0"}}
|
||||||
|
{"return": {}}
|
||||||
|
{"execute": "query-block-exports", "arguments": {}}
|
||||||
|
{"return": []}
|
||||||
|
|
||||||
|
Inactivating nodes with an export works with allow-inactive=true
|
||||||
|
{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "disk-fmt"}}
|
||||||
|
{"return": {}}
|
||||||
|
{"execute": "block-export-add", "arguments": {"allow-inactive": true, "id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}}
|
||||||
|
{"return": {}}
|
||||||
|
{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}}
|
||||||
|
{"return": {}}
|
||||||
|
disk-fmt active: False
|
||||||
|
{"execute": "query-block-exports", "arguments": {}}
|
||||||
|
{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]}
|
||||||
|
{"execute": "block-export-del", "arguments": {"id": "exp0"}}
|
||||||
|
{"return": {}}
|
||||||
|
{"execute": "query-block-exports", "arguments": {}}
|
||||||
|
{"return": []}
|
||||||
|
|
||||||
|
=== Inactive nodes with parent ===
|
||||||
|
|
||||||
|
Inactivating nodes with an active parent fails
|
||||||
|
{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "disk-fmt"}}
|
||||||
|
{"return": {}}
|
||||||
|
{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-file"}}
|
||||||
|
{"error": {"class": "GenericError", "desc": "Node has active parent node"}}
|
||||||
|
disk-file active: True
|
||||||
|
disk-fmt active: True
|
||||||
|
|
||||||
|
Inactivating nodes with an inactive parent works
|
||||||
|
{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}}
|
||||||
|
{"return": {}}
|
||||||
|
{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-file"}}
|
||||||
|
{"return": {}}
|
||||||
|
disk-file active: False
|
||||||
|
disk-fmt active: False
|
||||||
|
|
||||||
|
Creating active parent node with an inactive child fails
|
||||||
|
{"execute": "blockdev-add", "arguments": {"driver": "raw", "file": "disk-fmt", "node-name": "disk-filter"}}
|
||||||
|
{"error": {"class": "GenericError", "desc": "Inactive 'disk-fmt' can't be a file child of active 'disk-filter'"}}
|
||||||
|
{"execute": "blockdev-add", "arguments": {"active": true, "driver": "raw", "file": "disk-fmt", "node-name": "disk-filter"}}
|
||||||
|
{"error": {"class": "GenericError", "desc": "Inactive 'disk-fmt' can't be a file child of active 'disk-filter'"}}
|
||||||
|
|
||||||
|
Creating inactive parent node with an inactive child works
|
||||||
|
{"execute": "blockdev-add", "arguments": {"active": false, "driver": "raw", "file": "disk-fmt", "node-name": "disk-filter"}}
|
||||||
|
{"return": {}}
|
||||||
|
{"execute": "blockdev-del", "arguments": {"node-name": "disk-filter"}}
|
||||||
|
{"return": {}}
|
||||||
|
|
||||||
|
=== Resizing an inactive node ===
|
||||||
|
{"execute": "block_resize", "arguments": {"node-name": "disk-fmt", "size": 16777216}}
|
||||||
|
{"error": {"class": "GenericError", "desc": "Permission 'resize' unavailable on inactive node"}}
|
||||||
|
|
||||||
|
=== Taking a snapshot of an inactive node ===
|
||||||
|
|
||||||
|
Active overlay over inactive backing file automatically makes both inactive for compatibility
|
||||||
|
{"execute": "blockdev-add", "arguments": {"backing": null, "driver": "qcow2", "file": "snap-file", "node-name": "snap-fmt"}}
|
||||||
|
{"return": {}}
|
||||||
|
disk-fmt active: False
|
||||||
|
snap-fmt active: True
|
||||||
|
{"execute": "blockdev-snapshot", "arguments": {"node": "disk-fmt", "overlay": "snap-fmt"}}
|
||||||
|
{"return": {}}
|
||||||
|
disk-fmt active: False
|
||||||
|
snap-fmt active: False
|
||||||
|
{"execute": "blockdev-del", "arguments": {"node-name": "snap-fmt"}}
|
||||||
|
{"return": {}}
|
||||||
|
|
||||||
|
Inactive overlay over inactive backing file just works
|
||||||
|
{"execute": "blockdev-add", "arguments": {"active": false, "backing": null, "driver": "qcow2", "file": "snap-file", "node-name": "snap-fmt"}}
|
||||||
|
{"return": {}}
|
||||||
|
{"execute": "blockdev-snapshot", "arguments": {"node": "disk-fmt", "overlay": "snap-fmt"}}
|
||||||
|
{"return": {}}
|
||||||
|
|
||||||
|
=== Block jobs with inactive nodes ===
|
||||||
|
|
||||||
|
Streaming into an inactive node
|
||||||
|
{"execute": "block-stream", "arguments": {"device": "snap-fmt"}}
|
||||||
|
{"error": {"class": "GenericError", "desc": "Could not create node: Inactive 'snap-fmt' can't be a file child of active 'NODE_NAME'"}}
|
||||||
|
|
||||||
|
Committing an inactive root node (active commit)
|
||||||
|
{"execute": "block-commit", "arguments": {"device": "snap-fmt", "job-id": "job0"}}
|
||||||
|
{"error": {"class": "GenericError", "desc": "Inactive 'snap-fmt' can't be a backing child of active 'NODE_NAME'"}}
|
||||||
|
|
||||||
|
Committing an inactive intermediate node to inactive base
|
||||||
|
{"execute": "blockdev-add", "arguments": {"active": false, "backing": "snap-fmt", "driver": "qcow2", "file": "snap2-file", "node-name": "snap2-fmt"}}
|
||||||
|
{"return": {}}
|
||||||
|
disk-fmt active: False
|
||||||
|
snap-fmt active: False
|
||||||
|
snap2-fmt active: False
|
||||||
|
{"execute": "block-commit", "arguments": {"device": "snap2-fmt", "job-id": "job0", "top-node": "snap-fmt"}}
|
||||||
|
{"error": {"class": "GenericError", "desc": "Inactive 'snap-fmt' can't be a backing child of active 'NODE_NAME'"}}
|
||||||
|
|
||||||
|
Committing an inactive intermediate node to active base
|
||||||
|
{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "disk-fmt"}}
|
||||||
|
{"return": {}}
|
||||||
|
{"execute": "block-commit", "arguments": {"device": "snap2-fmt", "job-id": "job0", "top-node": "snap-fmt"}}
|
||||||
|
{"error": {"class": "GenericError", "desc": "Inactive 'snap-fmt' can't be a backing child of active 'NODE_NAME'"}}
|
||||||
|
|
||||||
|
Mirror from inactive source to active target
|
||||||
|
{"execute": "blockdev-mirror", "arguments": {"device": "snap2-fmt", "job-id": "job0", "sync": "full", "target": "target-fmt"}}
|
||||||
|
{"error": {"class": "GenericError", "desc": "Inactive 'snap2-fmt' can't be a backing child of active 'NODE_NAME'"}}
|
||||||
|
|
||||||
|
Mirror from active source to inactive target
|
||||||
|
disk-fmt active: True
|
||||||
|
snap-fmt active: False
|
||||||
|
snap2-fmt active: False
|
||||||
|
target-fmt active: True
|
||||||
|
{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "snap2-fmt"}}
|
||||||
|
{"return": {}}
|
||||||
|
{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "target-fmt"}}
|
||||||
|
{"return": {}}
|
||||||
|
disk-fmt active: True
|
||||||
|
snap-fmt active: True
|
||||||
|
snap2-fmt active: True
|
||||||
|
target-fmt active: False
|
||||||
|
{"execute": "blockdev-mirror", "arguments": {"device": "snap2-fmt", "job-id": "job0", "sync": "full", "target": "target-fmt"}}
|
||||||
|
{"error": {"class": "GenericError", "desc": "Permission 'write' unavailable on inactive node"}}
|
||||||
|
|
||||||
|
Backup from active source to inactive target
|
||||||
|
{"execute": "blockdev-backup", "arguments": {"device": "snap2-fmt", "job-id": "job0", "sync": "full", "target": "target-fmt"}}
|
||||||
|
{"error": {"class": "GenericError", "desc": "Could not create node: Inactive 'target-fmt' can't be a target child of active 'NODE_NAME'"}}
|
||||||
|
|
||||||
|
Backup from inactive source to active target
|
||||||
|
{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "snap2-fmt"}}
|
||||||
|
{"return": {}}
|
||||||
|
{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "target-fmt"}}
|
||||||
|
{"return": {}}
|
||||||
|
disk-fmt active: False
|
||||||
|
snap-fmt active: False
|
||||||
|
snap2-fmt active: False
|
||||||
|
target-fmt active: True
|
||||||
|
{"execute": "blockdev-backup", "arguments": {"device": "snap2-fmt", "job-id": "job0", "sync": "full", "target": "target-fmt"}}
|
||||||
|
{"error": {"class": "GenericError", "desc": "Could not create node: Inactive 'snap2-fmt' can't be a file child of active 'NODE_NAME'"}}
|
||||||
|
|
||||||
|
=== Accessing export on inactive node ===
|
||||||
|
{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "target-fmt"}}
|
||||||
|
{"return": {}}
|
||||||
|
{"execute": "block-export-add", "arguments": {"allow-inactive": true, "id": "exp0", "node-name": "target-fmt", "type": "nbd", "writable": true}}
|
||||||
|
{"return": {}}
|
||||||
|
read 65536/65536 bytes at offset 0
|
||||||
|
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||||
|
|
||||||
|
write failed: Operation not permitted
|
||||||
|
|
||||||
|
write failed: Operation not permitted
|
||||||
|
|
||||||
|
write failed: Operation not permitted
|
||||||
|
|
||||||
|
discard failed: Operation not permitted
|
||||||
|
|
||||||
|
|
||||||
|
qemu-io: Failed to get allocation status: Operation not permitted
|
||||||
|
|
||||||
|
|
||||||
|
=== Resuming VM activates all images ===
|
||||||
|
{"execute": "cont", "arguments": {}}
|
||||||
|
{"return": {}}
|
||||||
|
disk-fmt active: True
|
||||||
|
snap-fmt active: True
|
||||||
|
snap2-fmt active: True
|
||||||
|
target-fmt active: True
|
||||||
|
|
||||||
|
Shutting down...
|
||||||
|
|
|
@ -122,11 +122,10 @@ class TestDirtyBitmapMigration(iotests.QMPTestCase):
|
||||||
|
|
||||||
# catch 'Could not reopen qcow2 layer: Bitmap already exists'
|
# catch 'Could not reopen qcow2 layer: Bitmap already exists'
|
||||||
# possible error
|
# possible error
|
||||||
log = self.vm_a.get_log()
|
log = iotests.filter_qtest(self.vm_a.get_log())
|
||||||
log = re.sub(r'^\[I \d+\.\d+\] OPENED\n', '', log)
|
log = re.sub(r'^(wrote .* bytes at offset .*\n'
|
||||||
log = re.sub(r'^(wrote .* bytes at offset .*\n.*KiB.*ops.*sec.*\n){3}',
|
r'.*KiB.*ops.*sec.*\n?){3}',
|
||||||
'', log)
|
'', log)
|
||||||
log = re.sub(r'\[I \+\d+\.\d+\] CLOSED\n?$', '', log)
|
|
||||||
self.assertEqual(log, '')
|
self.assertEqual(log, '')
|
||||||
|
|
||||||
# test that bitmap is still persistent
|
# test that bitmap is still persistent
|
||||||
|
|
140
tests/qemu-iotests/tests/qsd-migrate
Executable file
140
tests/qemu-iotests/tests/qsd-migrate
Executable file
|
@ -0,0 +1,140 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
# group: rw quick
|
||||||
|
#
|
||||||
|
# Copyright (C) Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# This program is free software; you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation; either version 2 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
# Creator/Owner: Kevin Wolf <kwolf@redhat.com>
|
||||||
|
|
||||||
|
import iotests
|
||||||
|
|
||||||
|
from iotests import filter_qemu_io, filter_qtest
|
||||||
|
|
||||||
|
iotests.script_initialize(supported_fmts=['generic'],
|
||||||
|
supported_protocols=['file'],
|
||||||
|
supported_platforms=['linux'])
|
||||||
|
|
||||||
|
with iotests.FilePath('disk.img') as path, \
|
||||||
|
iotests.FilePath('nbd-src.sock', base_dir=iotests.sock_dir) as nbd_src, \
|
||||||
|
iotests.FilePath('nbd-dst.sock', base_dir=iotests.sock_dir) as nbd_dst, \
|
||||||
|
iotests.FilePath('migrate.sock', base_dir=iotests.sock_dir) as mig_sock, \
|
||||||
|
iotests.VM(path_suffix="-src") as vm_src, \
|
||||||
|
iotests.VM(path_suffix="-dst") as vm_dst:
|
||||||
|
|
||||||
|
img_size = '10M'
|
||||||
|
|
||||||
|
iotests.log('Preparing disk...')
|
||||||
|
iotests.qemu_img_create('-f', iotests.imgfmt, path, img_size)
|
||||||
|
|
||||||
|
iotests.log('Launching source QSD...')
|
||||||
|
qsd_src = iotests.QemuStorageDaemon(
|
||||||
|
'--blockdev', f'file,node-name=disk-file,filename={path}',
|
||||||
|
'--blockdev', f'{iotests.imgfmt},file=disk-file,node-name=disk-fmt',
|
||||||
|
'--nbd-server', f'addr.type=unix,addr.path={nbd_src}',
|
||||||
|
'--export', 'nbd,id=exp0,node-name=disk-fmt,writable=true,'
|
||||||
|
'allow-inactive=true',
|
||||||
|
qmp=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
iotests.log('Launching source VM...')
|
||||||
|
vm_src.add_args('-blockdev', f'nbd,node-name=disk,server.type=unix,'
|
||||||
|
f'server.path={nbd_src},export=disk-fmt')
|
||||||
|
vm_src.add_args('-device', 'virtio-blk,drive=disk,id=virtio0')
|
||||||
|
vm_src.launch()
|
||||||
|
|
||||||
|
iotests.log('Launching destination QSD...')
|
||||||
|
qsd_dst = iotests.QemuStorageDaemon(
|
||||||
|
'--blockdev', f'file,node-name=disk-file,filename={path},active=off',
|
||||||
|
'--blockdev', f'{iotests.imgfmt},file=disk-file,node-name=disk-fmt,'
|
||||||
|
f'active=off',
|
||||||
|
'--nbd-server', f'addr.type=unix,addr.path={nbd_dst}',
|
||||||
|
'--export', 'nbd,id=exp0,node-name=disk-fmt,writable=true,'
|
||||||
|
'allow-inactive=true',
|
||||||
|
qmp=True,
|
||||||
|
instance_id='b',
|
||||||
|
)
|
||||||
|
|
||||||
|
iotests.log('Launching destination VM...')
|
||||||
|
vm_dst.add_args('-blockdev', f'nbd,node-name=disk,server.type=unix,'
|
||||||
|
f'server.path={nbd_dst},export=disk-fmt')
|
||||||
|
vm_dst.add_args('-device', 'virtio-blk,drive=disk,id=virtio0')
|
||||||
|
vm_dst.add_args('-incoming', f'unix:{mig_sock}')
|
||||||
|
vm_dst.launch()
|
||||||
|
|
||||||
|
iotests.log('\nTest I/O on the source')
|
||||||
|
vm_src.hmp_qemu_io('virtio0/virtio-backend', 'write -P 0x11 0 4k',
|
||||||
|
use_log=True, qdev=True)
|
||||||
|
vm_src.hmp_qemu_io('virtio0/virtio-backend', 'read -P 0x11 0 4k',
|
||||||
|
use_log=True, qdev=True)
|
||||||
|
|
||||||
|
iotests.log('\nStarting migration...')
|
||||||
|
|
||||||
|
mig_caps = [
|
||||||
|
{'capability': 'events', 'state': True},
|
||||||
|
{'capability': 'pause-before-switchover', 'state': True},
|
||||||
|
]
|
||||||
|
vm_src.qmp_log('migrate-set-capabilities', capabilities=mig_caps)
|
||||||
|
vm_dst.qmp_log('migrate-set-capabilities', capabilities=mig_caps)
|
||||||
|
vm_src.qmp_log('migrate', uri=f'unix:{mig_sock}',
|
||||||
|
filters=[iotests.filter_qmp_testfiles])
|
||||||
|
|
||||||
|
vm_src.event_wait('MIGRATION',
|
||||||
|
match={'data': {'status': 'pre-switchover'}})
|
||||||
|
|
||||||
|
iotests.log('\nPre-switchover: Reconfigure QSD instances')
|
||||||
|
|
||||||
|
iotests.log(qsd_src.qmp('blockdev-set-active', {'active': False}))
|
||||||
|
|
||||||
|
# Reading is okay from both sides while the image is inactive. Note that
|
||||||
|
# the destination may have stale data until it activates the image, though.
|
||||||
|
vm_src.hmp_qemu_io('virtio0/virtio-backend', 'read -P 0x11 0 4k',
|
||||||
|
use_log=True, qdev=True)
|
||||||
|
vm_dst.hmp_qemu_io('virtio0/virtio-backend', 'read 0 4k',
|
||||||
|
use_log=True, qdev=True)
|
||||||
|
|
||||||
|
iotests.log(qsd_dst.qmp('blockdev-set-active', {'active': True}))
|
||||||
|
|
||||||
|
iotests.log('\nCompleting migration...')
|
||||||
|
|
||||||
|
vm_src.qmp_log('migrate-continue', state='pre-switchover')
|
||||||
|
vm_dst.event_wait('MIGRATION', match={'data': {'status': 'completed'}})
|
||||||
|
|
||||||
|
iotests.log('\nTest I/O on the destination')
|
||||||
|
|
||||||
|
# Now the destination must see what the source wrote
|
||||||
|
vm_dst.hmp_qemu_io('virtio0/virtio-backend', 'read -P 0x11 0 4k',
|
||||||
|
use_log=True, qdev=True)
|
||||||
|
|
||||||
|
# And be able to overwrite it
|
||||||
|
vm_dst.hmp_qemu_io('virtio0/virtio-backend', 'write -P 0x22 0 4k',
|
||||||
|
use_log=True, qdev=True)
|
||||||
|
vm_dst.hmp_qemu_io('virtio0/virtio-backend', 'read -P 0x22 0 4k',
|
||||||
|
use_log=True, qdev=True)
|
||||||
|
|
||||||
|
iotests.log('\nDone')
|
||||||
|
|
||||||
|
vm_src.shutdown()
|
||||||
|
iotests.log('\n--- vm_src log ---')
|
||||||
|
log = vm_src.get_log()
|
||||||
|
if log:
|
||||||
|
iotests.log(log, [filter_qtest, filter_qemu_io])
|
||||||
|
qsd_src.stop()
|
||||||
|
|
||||||
|
vm_dst.shutdown()
|
||||||
|
iotests.log('\n--- vm_dst log ---')
|
||||||
|
log = vm_dst.get_log()
|
||||||
|
if log:
|
||||||
|
iotests.log(log, [filter_qtest, filter_qemu_io])
|
||||||
|
qsd_dst.stop()
|
59
tests/qemu-iotests/tests/qsd-migrate.out
Normal file
59
tests/qemu-iotests/tests/qsd-migrate.out
Normal file
|
@ -0,0 +1,59 @@
|
||||||
|
Preparing disk...
|
||||||
|
Launching source QSD...
|
||||||
|
Launching source VM...
|
||||||
|
Launching destination QSD...
|
||||||
|
Launching destination VM...
|
||||||
|
|
||||||
|
Test I/O on the source
|
||||||
|
{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"write -P 0x11 0 4k\""}}
|
||||||
|
{"return": ""}
|
||||||
|
{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"read -P 0x11 0 4k\""}}
|
||||||
|
{"return": ""}
|
||||||
|
|
||||||
|
Starting migration...
|
||||||
|
{"execute": "migrate-set-capabilities", "arguments": {"capabilities": [{"capability": "events", "state": true}, {"capability": "pause-before-switchover", "state": true}]}}
|
||||||
|
{"return": {}}
|
||||||
|
{"execute": "migrate-set-capabilities", "arguments": {"capabilities": [{"capability": "events", "state": true}, {"capability": "pause-before-switchover", "state": true}]}}
|
||||||
|
{"return": {}}
|
||||||
|
{"execute": "migrate", "arguments": {"uri": "unix:SOCK_DIR/PID-migrate.sock"}}
|
||||||
|
{"return": {}}
|
||||||
|
|
||||||
|
Pre-switchover: Reconfigure QSD instances
|
||||||
|
{"return": {}}
|
||||||
|
{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"read -P 0x11 0 4k\""}}
|
||||||
|
{"return": ""}
|
||||||
|
{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"read 0 4k\""}}
|
||||||
|
{"return": ""}
|
||||||
|
{"return": {}}
|
||||||
|
|
||||||
|
Completing migration...
|
||||||
|
{"execute": "migrate-continue", "arguments": {"state": "pre-switchover"}}
|
||||||
|
{"return": {}}
|
||||||
|
|
||||||
|
Test I/O on the destination
|
||||||
|
{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"read -P 0x11 0 4k\""}}
|
||||||
|
{"return": ""}
|
||||||
|
{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"write -P 0x22 0 4k\""}}
|
||||||
|
{"return": ""}
|
||||||
|
{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"read -P 0x22 0 4k\""}}
|
||||||
|
{"return": ""}
|
||||||
|
|
||||||
|
Done
|
||||||
|
|
||||||
|
--- vm_src log ---
|
||||||
|
wrote 4096/4096 bytes at offset 0
|
||||||
|
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||||
|
read 4096/4096 bytes at offset 0
|
||||||
|
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||||
|
read 4096/4096 bytes at offset 0
|
||||||
|
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||||
|
|
||||||
|
--- vm_dst log ---
|
||||||
|
read 4096/4096 bytes at offset 0
|
||||||
|
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||||
|
read 4096/4096 bytes at offset 0
|
||||||
|
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||||
|
wrote 4096/4096 bytes at offset 0
|
||||||
|
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||||
|
read 4096/4096 bytes at offset 0
|
||||||
|
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
Loading…
Add table
Add a link
Reference in a new issue