Merge remote-tracking branch 'stefanha/block' into staging

* stefanha/block:
  commit: Remove unused check
  qemu-iotests: Update test cases for commit active
  commit: Support commit active layer
  block: Add commit_active_start()
  mirror: Move base to MirrorBlockJob
  mirror: Don't close target
  qemu-iotests: drop duplicate virtio-blk initialization failure
  vmdk: Allow vmdk_create to work with protocol
  vmdk: Check VMFS extent line field number
  docs: updated qemu-img man page and qemu-doc to reflect VHDX support.
  block: vhdx - improve error message, and .bdrv_check implementation
  block/iscsi: Fix compilation for libiscsi 1.4.0 (API change)
  qapi-schema: fix QEMU 1.8 references
  dataplane: replace hostmem with memory_region_find
  dataplane: change vring API to use VirtQueueElement
  vring: factor common code for error exits
  vring: create a common function to parse descriptors
  sheepdog: fix dynamic grow for running qcow2 format

Message-id: 1387554416-5837-1-git-send-email-stefanha@redhat.com
Signed-off-by: Anthony Liguori <aliguori@amazon.com>
This commit is contained in:
Anthony Liguori 2014-01-10 11:05:05 -08:00
commit debe40fbc5
24 changed files with 535 additions and 541 deletions

View file

@ -198,13 +198,7 @@ void commit_start(BlockDriverState *bs, BlockDriverState *base,
return; return;
} }
/* Once we support top == active layer, remove this check */ assert(top != bs);
if (top == bs) {
error_setg(errp,
"Top image as the active layer is currently unsupported");
return;
}
if (top == base) { if (top == base) {
error_setg(errp, "Invalid files for merge: top and base are the same"); error_setg(errp, "Invalid files for merge: top and base are the same");
return; return;

View file

@ -359,7 +359,10 @@ retry:
default: default:
iTask.task = iscsi_read10_task(iscsilun->iscsi, iscsilun->lun, lba, iTask.task = iscsi_read10_task(iscsilun->iscsi, iscsilun->lun, lba,
num_sectors * iscsilun->block_size, num_sectors * iscsilun->block_size,
iscsilun->block_size, 0, 0, 0, 0, 0, iscsilun->block_size,
#if !defined(CONFIG_LIBISCSI_1_4) /* API change from 1.4.0 to 1.5.0 */
0, 0, 0, 0, 0,
#endif
iscsi_co_generic_cb, &iTask); iscsi_co_generic_cb, &iTask);
break; break;
} }

View file

@ -31,7 +31,8 @@ typedef struct MirrorBlockJob {
BlockJob common; BlockJob common;
RateLimit limit; RateLimit limit;
BlockDriverState *target; BlockDriverState *target;
MirrorSyncMode mode; BlockDriverState *base;
bool is_none_mode;
BlockdevOnError on_source_error, on_target_error; BlockdevOnError on_source_error, on_target_error;
bool synced; bool synced;
bool should_complete; bool should_complete;
@ -335,10 +336,9 @@ static void coroutine_fn mirror_run(void *opaque)
sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
mirror_free_init(s); mirror_free_init(s);
if (s->mode != MIRROR_SYNC_MODE_NONE) { if (!s->is_none_mode) {
/* First part, loop on the sectors and initialize the dirty bitmap. */ /* First part, loop on the sectors and initialize the dirty bitmap. */
BlockDriverState *base; BlockDriverState *base = s->base;
base = s->mode == MIRROR_SYNC_MODE_FULL ? NULL : bs->backing_hd;
for (sector_num = 0; sector_num < end; ) { for (sector_num = 0; sector_num < end; ) {
int64_t next = (sector_num | (sectors_per_chunk - 1)) + 1; int64_t next = (sector_num | (sectors_per_chunk - 1)) + 1;
ret = bdrv_is_allocated_above(bs, base, ret = bdrv_is_allocated_above(bs, base,
@ -481,8 +481,14 @@ immediate_exit:
bdrv_reopen(s->target, bdrv_get_flags(s->common.bs), NULL); bdrv_reopen(s->target, bdrv_get_flags(s->common.bs), NULL);
} }
bdrv_swap(s->target, s->common.bs); bdrv_swap(s->target, s->common.bs);
if (s->common.driver->job_type == BLOCK_JOB_TYPE_COMMIT) {
/* drop the bs loop chain formed by the swap: break the loop then
* trigger the unref from the top one */
BlockDriverState *p = s->base->backing_hd;
s->base->backing_hd = NULL;
bdrv_unref(p);
}
} }
bdrv_close(s->target);
bdrv_unref(s->target); bdrv_unref(s->target);
block_job_completed(&s->common, ret); block_job_completed(&s->common, ret);
} }
@ -536,12 +542,24 @@ static const BlockJobDriver mirror_job_driver = {
.complete = mirror_complete, .complete = mirror_complete,
}; };
void mirror_start(BlockDriverState *bs, BlockDriverState *target, static const BlockJobDriver commit_active_job_driver = {
int64_t speed, int64_t granularity, int64_t buf_size, .instance_size = sizeof(MirrorBlockJob),
MirrorSyncMode mode, BlockdevOnError on_source_error, .job_type = BLOCK_JOB_TYPE_COMMIT,
BlockdevOnError on_target_error, .set_speed = mirror_set_speed,
BlockDriverCompletionFunc *cb, .iostatus_reset
void *opaque, Error **errp) = mirror_iostatus_reset,
.complete = mirror_complete,
};
static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target,
int64_t speed, int64_t granularity,
int64_t buf_size,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
BlockDriverCompletionFunc *cb,
void *opaque, Error **errp,
const BlockJobDriver *driver,
bool is_none_mode, BlockDriverState *base)
{ {
MirrorBlockJob *s; MirrorBlockJob *s;
@ -566,7 +584,8 @@ void mirror_start(BlockDriverState *bs, BlockDriverState *target,
return; return;
} }
s = block_job_create(&mirror_job_driver, bs, speed, cb, opaque, errp);
s = block_job_create(driver, bs, speed, cb, opaque, errp);
if (!s) { if (!s) {
return; return;
} }
@ -574,7 +593,8 @@ void mirror_start(BlockDriverState *bs, BlockDriverState *target,
s->on_source_error = on_source_error; s->on_source_error = on_source_error;
s->on_target_error = on_target_error; s->on_target_error = on_target_error;
s->target = target; s->target = target;
s->mode = mode; s->is_none_mode = is_none_mode;
s->base = base;
s->granularity = granularity; s->granularity = granularity;
s->buf_size = MAX(buf_size, granularity); s->buf_size = MAX(buf_size, granularity);
@ -586,3 +606,35 @@ void mirror_start(BlockDriverState *bs, BlockDriverState *target,
trace_mirror_start(bs, s, s->common.co, opaque); trace_mirror_start(bs, s, s->common.co, opaque);
qemu_coroutine_enter(s->common.co, s); qemu_coroutine_enter(s->common.co, s);
} }
void mirror_start(BlockDriverState *bs, BlockDriverState *target,
int64_t speed, int64_t granularity, int64_t buf_size,
MirrorSyncMode mode, BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
BlockDriverCompletionFunc *cb,
void *opaque, Error **errp)
{
bool is_none_mode;
BlockDriverState *base;
is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
base = mode == MIRROR_SYNC_MODE_TOP ? bs->backing_hd : NULL;
mirror_start_job(bs, target, speed, granularity, buf_size,
on_source_error, on_target_error, cb, opaque, errp,
&mirror_job_driver, is_none_mode, base);
}
void commit_active_start(BlockDriverState *bs, BlockDriverState *base,
int64_t speed,
BlockdevOnError on_error,
BlockDriverCompletionFunc *cb,
void *opaque, Error **errp)
{
if (bdrv_reopen(base, bs->open_flags, errp)) {
return;
}
bdrv_ref(base);
mirror_start_job(bs, base, speed, 0, 0,
on_error, on_error, cb, opaque, errp,
&commit_active_job_driver, false, base);
}

View file

@ -2048,13 +2048,14 @@ static coroutine_fn int sd_co_writev(BlockDriverState *bs, int64_t sector_num,
{ {
SheepdogAIOCB *acb; SheepdogAIOCB *acb;
int ret; int ret;
int64_t offset = (sector_num + nb_sectors) * BDRV_SECTOR_SIZE;
BDRVSheepdogState *s = bs->opaque;
if (bs->growable && sector_num + nb_sectors > bs->total_sectors) { if (bs->growable && offset > s->inode.vdi_size) {
ret = sd_truncate(bs, (sector_num + nb_sectors) * BDRV_SECTOR_SIZE); ret = sd_truncate(bs, offset);
if (ret < 0) { if (ret < 0) {
return ret; return ret;
} }
bs->total_sectors = sector_num + nb_sectors;
} }
acb = sd_aio_setup(bs, qiov, sector_num, nb_sectors); acb = sd_aio_setup(bs, qiov, sector_num, nb_sectors);

View file

@ -706,7 +706,8 @@ exit:
* *
* If read-only, we must replay the log in RAM (or refuse to open * If read-only, we must replay the log in RAM (or refuse to open
* a dirty VHDX file read-only) */ * a dirty VHDX file read-only) */
int vhdx_parse_log(BlockDriverState *bs, BDRVVHDXState *s, bool *flushed) int vhdx_parse_log(BlockDriverState *bs, BDRVVHDXState *s, bool *flushed,
Error **errp)
{ {
int ret = 0; int ret = 0;
VHDXHeader *hdr; VHDXHeader *hdr;
@ -761,6 +762,16 @@ int vhdx_parse_log(BlockDriverState *bs, BDRVVHDXState *s, bool *flushed)
} }
if (logs.valid) { if (logs.valid) {
if (bs->read_only) {
ret = -EPERM;
error_setg_errno(errp, EPERM,
"VHDX image file '%s' opened read-only, but "
"contains a log that needs to be replayed. To "
"replay the log, execute:\n qemu-img check -r "
"all '%s'",
bs->filename, bs->filename);
goto exit;
}
/* now flush the log */ /* now flush the log */
ret = vhdx_log_flush(bs, s, &logs); ret = vhdx_log_flush(bs, s, &logs);
if (ret < 0) { if (ret < 0) {

View file

@ -878,7 +878,6 @@ static int vhdx_open(BlockDriverState *bs, QDict *options, int flags,
int ret = 0; int ret = 0;
uint32_t i; uint32_t i;
uint64_t signature; uint64_t signature;
bool log_flushed = false;
s->bat = NULL; s->bat = NULL;
@ -907,7 +906,7 @@ static int vhdx_open(BlockDriverState *bs, QDict *options, int flags,
goto fail; goto fail;
} }
ret = vhdx_parse_log(bs, s, &log_flushed); ret = vhdx_parse_log(bs, s, &s->log_replayed_on_open, errp);
if (ret < 0) { if (ret < 0) {
goto fail; goto fail;
} }
@ -1854,6 +1853,24 @@ exit:
return ret; return ret;
} }
/* If opened r/w, the VHDX driver will automatically replay the log,
* if one is present, inside the vhdx_open() call.
*
* If qemu-img check -r all is called, the image is automatically opened
* r/w and any log has already been replayed, so there is nothing (currently)
* for us to do here
*/
static int vhdx_check(BlockDriverState *bs, BdrvCheckResult *result,
BdrvCheckMode fix)
{
BDRVVHDXState *s = bs->opaque;
if (s->log_replayed_on_open) {
result->corruptions_fixed++;
}
return 0;
}
static QEMUOptionParameter vhdx_create_options[] = { static QEMUOptionParameter vhdx_create_options[] = {
{ {
.name = BLOCK_OPT_SIZE, .name = BLOCK_OPT_SIZE,
@ -1898,6 +1915,7 @@ static BlockDriver bdrv_vhdx = {
.bdrv_co_writev = vhdx_co_writev, .bdrv_co_writev = vhdx_co_writev,
.bdrv_create = vhdx_create, .bdrv_create = vhdx_create,
.bdrv_get_info = vhdx_get_info, .bdrv_get_info = vhdx_get_info,
.bdrv_check = vhdx_check,
.create_options = vhdx_create_options, .create_options = vhdx_create_options,
}; };

View file

@ -394,6 +394,8 @@ typedef struct BDRVVHDXState {
Error *migration_blocker; Error *migration_blocker;
bool log_replayed_on_open;
QLIST_HEAD(VHDXRegionHead, VHDXRegionEntry) regions; QLIST_HEAD(VHDXRegionHead, VHDXRegionEntry) regions;
} BDRVVHDXState; } BDRVVHDXState;
@ -408,7 +410,8 @@ uint32_t vhdx_checksum_calc(uint32_t crc, uint8_t *buf, size_t size,
bool vhdx_checksum_is_valid(uint8_t *buf, size_t size, int crc_offset); bool vhdx_checksum_is_valid(uint8_t *buf, size_t size, int crc_offset);
int vhdx_parse_log(BlockDriverState *bs, BDRVVHDXState *s, bool *flushed); int vhdx_parse_log(BlockDriverState *bs, BDRVVHDXState *s, bool *flushed,
Error **errp);
int vhdx_log_write_and_flush(BlockDriverState *bs, BDRVVHDXState *s, int vhdx_log_write_and_flush(BlockDriverState *bs, BDRVVHDXState *s,
void *data, uint32_t length, uint64_t offset); void *data, uint32_t length, uint64_t offset);

View file

@ -749,9 +749,14 @@ static int vmdk_parse_extents(const char *desc, BlockDriverState *bs,
return -EINVAL; return -EINVAL;
} }
} else if (!strcmp(type, "VMFS")) { } else if (!strcmp(type, "VMFS")) {
flat_offset = 0; if (ret == 4) {
flat_offset = 0;
} else {
error_setg(errp, "Invalid extent lines:\n%s", p);
return -EINVAL;
}
} else if (ret != 4) { } else if (ret != 4) {
error_setg(errp, "Invalid extent lines: \n%s", p); error_setg(errp, "Invalid extent lines:\n%s", p);
return -EINVAL; return -EINVAL;
} }
@ -1447,23 +1452,33 @@ static int coroutine_fn vmdk_co_write_zeroes(BlockDriverState *bs,
} }
static int vmdk_create_extent(const char *filename, int64_t filesize, static int vmdk_create_extent(const char *filename, int64_t filesize,
bool flat, bool compress, bool zeroed_grain) bool flat, bool compress, bool zeroed_grain,
Error **errp)
{ {
int ret, i; int ret, i;
int fd = 0; BlockDriverState *bs = NULL;
VMDK4Header header; VMDK4Header header;
uint32_t tmp, magic, grains, gd_size, gt_size, gt_count; Error *local_err;
uint32_t tmp, magic, grains, gd_sectors, gt_size, gt_count;
uint32_t *gd_buf = NULL;
int gd_buf_size;
fd = qemu_open(filename, ret = bdrv_create_file(filename, NULL, &local_err);
O_WRONLY | O_CREAT | O_TRUNC | O_BINARY | O_LARGEFILE, if (ret < 0) {
0644); error_propagate(errp, local_err);
if (fd < 0) { goto exit;
return -errno;
} }
ret = bdrv_file_open(&bs, filename, NULL, BDRV_O_RDWR, &local_err);
if (ret < 0) {
error_propagate(errp, local_err);
goto exit;
}
if (flat) { if (flat) {
ret = ftruncate(fd, filesize); ret = bdrv_truncate(bs, filesize);
if (ret < 0) { if (ret < 0) {
ret = -errno; error_setg(errp, "Could not truncate file");
} }
goto exit; goto exit;
} }
@ -1474,24 +1489,23 @@ static int vmdk_create_extent(const char *filename, int64_t filesize,
| (compress ? VMDK4_FLAG_COMPRESS | VMDK4_FLAG_MARKER : 0) | (compress ? VMDK4_FLAG_COMPRESS | VMDK4_FLAG_MARKER : 0)
| (zeroed_grain ? VMDK4_FLAG_ZERO_GRAIN : 0); | (zeroed_grain ? VMDK4_FLAG_ZERO_GRAIN : 0);
header.compressAlgorithm = compress ? VMDK4_COMPRESSION_DEFLATE : 0; header.compressAlgorithm = compress ? VMDK4_COMPRESSION_DEFLATE : 0;
header.capacity = filesize / 512; header.capacity = filesize / BDRV_SECTOR_SIZE;
header.granularity = 128; header.granularity = 128;
header.num_gtes_per_gt = 512; header.num_gtes_per_gt = BDRV_SECTOR_SIZE;
grains = (filesize / 512 + header.granularity - 1) / header.granularity; grains = DIV_ROUND_UP(filesize / BDRV_SECTOR_SIZE, header.granularity);
gt_size = ((header.num_gtes_per_gt * sizeof(uint32_t)) + 511) >> 9; gt_size = DIV_ROUND_UP(header.num_gtes_per_gt * sizeof(uint32_t),
gt_count = BDRV_SECTOR_SIZE);
(grains + header.num_gtes_per_gt - 1) / header.num_gtes_per_gt; gt_count = DIV_ROUND_UP(grains, header.num_gtes_per_gt);
gd_size = (gt_count * sizeof(uint32_t) + 511) >> 9; gd_sectors = DIV_ROUND_UP(gt_count * sizeof(uint32_t), BDRV_SECTOR_SIZE);
header.desc_offset = 1; header.desc_offset = 1;
header.desc_size = 20; header.desc_size = 20;
header.rgd_offset = header.desc_offset + header.desc_size; header.rgd_offset = header.desc_offset + header.desc_size;
header.gd_offset = header.rgd_offset + gd_size + (gt_size * gt_count); header.gd_offset = header.rgd_offset + gd_sectors + (gt_size * gt_count);
header.grain_offset = header.grain_offset =
((header.gd_offset + gd_size + (gt_size * gt_count) + ROUND_UP(header.gd_offset + gd_sectors + (gt_size * gt_count),
header.granularity - 1) / header.granularity) * header.granularity);
header.granularity;
/* swap endianness for all header fields */ /* swap endianness for all header fields */
header.version = cpu_to_le32(header.version); header.version = cpu_to_le32(header.version);
header.flags = cpu_to_le32(header.flags); header.flags = cpu_to_le32(header.flags);
@ -1511,48 +1525,55 @@ static int vmdk_create_extent(const char *filename, int64_t filesize,
header.check_bytes[3] = 0xa; header.check_bytes[3] = 0xa;
/* write all the data */ /* write all the data */
ret = qemu_write_full(fd, &magic, sizeof(magic)); ret = bdrv_pwrite(bs, 0, &magic, sizeof(magic));
if (ret != sizeof(magic)) { if (ret < 0) {
ret = -errno; error_set(errp, QERR_IO_ERROR);
goto exit; goto exit;
} }
ret = qemu_write_full(fd, &header, sizeof(header)); ret = bdrv_pwrite(bs, sizeof(magic), &header, sizeof(header));
if (ret != sizeof(header)) { if (ret < 0) {
ret = -errno; error_set(errp, QERR_IO_ERROR);
goto exit; goto exit;
} }
ret = ftruncate(fd, le64_to_cpu(header.grain_offset) << 9); ret = bdrv_truncate(bs, le64_to_cpu(header.grain_offset) << 9);
if (ret < 0) { if (ret < 0) {
ret = -errno; error_setg(errp, "Could not truncate file");
goto exit; goto exit;
} }
/* write grain directory */ /* write grain directory */
lseek(fd, le64_to_cpu(header.rgd_offset) << 9, SEEK_SET); gd_buf_size = gd_sectors * BDRV_SECTOR_SIZE;
for (i = 0, tmp = le64_to_cpu(header.rgd_offset) + gd_size; gd_buf = g_malloc0(gd_buf_size);
for (i = 0, tmp = le64_to_cpu(header.rgd_offset) + gd_sectors;
i < gt_count; i++, tmp += gt_size) { i < gt_count; i++, tmp += gt_size) {
ret = qemu_write_full(fd, &tmp, sizeof(tmp)); gd_buf[i] = cpu_to_le32(tmp);
if (ret != sizeof(tmp)) { }
ret = -errno; ret = bdrv_pwrite(bs, le64_to_cpu(header.rgd_offset) * BDRV_SECTOR_SIZE,
goto exit; gd_buf, gd_buf_size);
} if (ret < 0) {
error_set(errp, QERR_IO_ERROR);
goto exit;
} }
/* write backup grain directory */ /* write backup grain directory */
lseek(fd, le64_to_cpu(header.gd_offset) << 9, SEEK_SET); for (i = 0, tmp = le64_to_cpu(header.gd_offset) + gd_sectors;
for (i = 0, tmp = le64_to_cpu(header.gd_offset) + gd_size;
i < gt_count; i++, tmp += gt_size) { i < gt_count; i++, tmp += gt_size) {
ret = qemu_write_full(fd, &tmp, sizeof(tmp)); gd_buf[i] = cpu_to_le32(tmp);
if (ret != sizeof(tmp)) { }
ret = -errno; ret = bdrv_pwrite(bs, le64_to_cpu(header.gd_offset) * BDRV_SECTOR_SIZE,
goto exit; gd_buf, gd_buf_size);
} if (ret < 0) {
error_set(errp, QERR_IO_ERROR);
goto exit;
} }
ret = 0; ret = 0;
exit: exit:
qemu_close(fd); if (bs) {
bdrv_unref(bs);
}
g_free(gd_buf);
return ret; return ret;
} }
@ -1599,7 +1620,9 @@ static int filename_decompose(const char *filename, char *path, char *prefix,
static int vmdk_create(const char *filename, QEMUOptionParameter *options, static int vmdk_create(const char *filename, QEMUOptionParameter *options,
Error **errp) Error **errp)
{ {
int fd, idx = 0; int idx = 0;
BlockDriverState *new_bs = NULL;
Error *local_err;
char *desc = NULL; char *desc = NULL;
int64_t total_size = 0, filesize; int64_t total_size = 0, filesize;
const char *adapter_type = NULL; const char *adapter_type = NULL;
@ -1616,6 +1639,7 @@ static int vmdk_create(const char *filename, QEMUOptionParameter *options,
uint32_t parent_cid = 0xffffffff; uint32_t parent_cid = 0xffffffff;
uint32_t number_heads = 16; uint32_t number_heads = 16;
bool zeroed_grain = false; bool zeroed_grain = false;
uint32_t desc_offset = 0, desc_len;
const char desc_template[] = const char desc_template[] =
"# Disk DescriptorFile\n" "# Disk DescriptorFile\n"
"version=1\n" "version=1\n"
@ -1749,7 +1773,7 @@ static int vmdk_create(const char *filename, QEMUOptionParameter *options,
path, desc_filename); path, desc_filename);
if (vmdk_create_extent(ext_filename, size, if (vmdk_create_extent(ext_filename, size,
flat, compress, zeroed_grain)) { flat, compress, zeroed_grain, errp)) {
ret = -EINVAL; ret = -EINVAL;
goto exit; goto exit;
} }
@ -1757,7 +1781,7 @@ static int vmdk_create(const char *filename, QEMUOptionParameter *options,
/* Format description line */ /* Format description line */
snprintf(desc_line, sizeof(desc_line), snprintf(desc_line, sizeof(desc_line),
desc_extent_line, size / 512, desc_filename); desc_extent_line, size / BDRV_SECTOR_SIZE, desc_filename);
g_string_append(ext_desc_lines, desc_line); g_string_append(ext_desc_lines, desc_line);
} }
/* generate descriptor file */ /* generate descriptor file */
@ -1768,36 +1792,43 @@ static int vmdk_create(const char *filename, QEMUOptionParameter *options,
parent_desc_line, parent_desc_line,
ext_desc_lines->str, ext_desc_lines->str,
(flags & BLOCK_FLAG_COMPAT6 ? 6 : 4), (flags & BLOCK_FLAG_COMPAT6 ? 6 : 4),
total_size / (int64_t)(63 * number_heads * 512), total_size /
(int64_t)(63 * number_heads * BDRV_SECTOR_SIZE),
number_heads, number_heads,
adapter_type); adapter_type);
if (split || flat) { desc_len = strlen(desc);
fd = qemu_open(filename, /* the descriptor offset = 0x200 */
O_WRONLY | O_CREAT | O_TRUNC | O_BINARY | O_LARGEFILE, if (!split && !flat) {
0644); desc_offset = 0x200;
} else { } else {
fd = qemu_open(filename, ret = bdrv_create_file(filename, options, &local_err);
O_WRONLY | O_BINARY | O_LARGEFILE, if (ret < 0) {
0644); error_setg_errno(errp, -ret, "Could not create image file");
goto exit;
}
} }
if (fd < 0) { ret = bdrv_file_open(&new_bs, filename, NULL, BDRV_O_RDWR, &local_err);
ret = -errno; if (ret < 0) {
error_setg_errno(errp, -ret, "Could not write description");
goto exit; goto exit;
} }
/* the descriptor offset = 0x200 */ ret = bdrv_pwrite(new_bs, desc_offset, desc, desc_len);
if (!split && !flat && 0x200 != lseek(fd, 0x200, SEEK_SET)) { if (ret < 0) {
ret = -errno; error_setg_errno(errp, -ret, "Could not write description");
goto close_exit; goto exit;
} }
ret = qemu_write_full(fd, desc, strlen(desc)); /* bdrv_pwrite write padding zeros to align to sector, we don't need that
if (ret != strlen(desc)) { * for description file */
ret = -errno; if (desc_offset == 0) {
goto close_exit; ret = bdrv_truncate(new_bs, desc_len);
if (ret < 0) {
error_setg(errp, "Could not truncate file");
}
} }
ret = 0;
close_exit:
qemu_close(fd);
exit: exit:
if (new_bs) {
bdrv_unref(new_bs);
}
g_free(desc); g_free(desc);
g_string_free(ext_desc_lines, true); g_string_free(ext_desc_lines, true);
return ret; return ret;

View file

@ -1820,8 +1820,13 @@ void qmp_block_commit(const char *device,
return; return;
} }
commit_start(bs, base_bs, top_bs, speed, on_error, block_job_cb, bs, if (top_bs == bs) {
&local_err); commit_active_start(bs, base_bs, speed, on_error, block_job_cb,
bs, &local_err);
} else {
commit_start(bs, base_bs, top_bs, speed, on_error, block_job_cb, bs,
&local_err);
}
if (local_err != NULL) { if (local_err != NULL) {
error_propagate(errp, local_err); error_propagate(errp, local_err);
return; return;

22
configure vendored
View file

@ -3078,6 +3078,21 @@ EOF
fi fi
fi fi
# We also need to know the API version because there was an
# API change from 1.4.0 to 1.5.0.
if test "$libiscsi" = "yes"; then
cat >$TMPC <<EOF
#include <iscsi/iscsi.h>
int main(void)
{
iscsi_read10_task(0, 0, 0, 0, 0, 0, 0);
return 0;
}
EOF
if compile_prog "" "-liscsi"; then
libiscsi_version="1.4.0"
fi
fi
########################################## ##########################################
# Do we need libm # Do we need libm
@ -3805,7 +3820,11 @@ echo "nss used $smartcard_nss"
echo "libusb $libusb" echo "libusb $libusb"
echo "usb net redir $usb_redir" echo "usb net redir $usb_redir"
echo "GLX support $glx" echo "GLX support $glx"
if test "$libiscsi_version" = "1.4.0"; then
echo "libiscsi support $libiscsi (1.4.0)"
else
echo "libiscsi support $libiscsi" echo "libiscsi support $libiscsi"
fi
echo "build guest agent $guest_agent" echo "build guest agent $guest_agent"
echo "QGA VSS support $guest_agent_with_vss" echo "QGA VSS support $guest_agent_with_vss"
echo "seccomp support $seccomp" echo "seccomp support $seccomp"
@ -4137,6 +4156,9 @@ fi
if test "$libiscsi" = "yes" ; then if test "$libiscsi" = "yes" ; then
echo "CONFIG_LIBISCSI=y" >> $config_host_mak echo "CONFIG_LIBISCSI=y" >> $config_host_mak
if test "$libiscsi_version" = "1.4.0"; then
echo "CONFIG_LIBISCSI_1_4=y" >> $config_host_mak
fi
fi fi
if test "$seccomp" = "yes"; then if test "$seccomp" = "yes"; then

View file

@ -35,7 +35,7 @@ enum {
typedef struct { typedef struct {
struct iocb iocb; /* Linux AIO control block */ struct iocb iocb; /* Linux AIO control block */
QEMUIOVector *inhdr; /* iovecs for virtio_blk_inhdr */ QEMUIOVector *inhdr; /* iovecs for virtio_blk_inhdr */
unsigned int head; /* vring descriptor index */ VirtQueueElement *elem; /* saved data from the virtqueue */
struct iovec *bounce_iov; /* used if guest buffers are unaligned */ struct iovec *bounce_iov; /* used if guest buffers are unaligned */
QEMUIOVector *read_qiov; /* for read completion /w bounce buffer */ QEMUIOVector *read_qiov; /* for read completion /w bounce buffer */
} VirtIOBlockRequest; } VirtIOBlockRequest;
@ -96,7 +96,7 @@ static void complete_request(struct iocb *iocb, ssize_t ret, void *opaque)
len = 0; len = 0;
} }
trace_virtio_blk_data_plane_complete_request(s, req->head, ret); trace_virtio_blk_data_plane_complete_request(s, req->elem->index, ret);
if (req->read_qiov) { if (req->read_qiov) {
assert(req->bounce_iov); assert(req->bounce_iov);
@ -118,12 +118,12 @@ static void complete_request(struct iocb *iocb, ssize_t ret, void *opaque)
* written to, but for virtio-blk it seems to be the number of bytes * written to, but for virtio-blk it seems to be the number of bytes
* transferred plus the status bytes. * transferred plus the status bytes.
*/ */
vring_push(&s->vring, req->head, len + sizeof(hdr)); vring_push(&s->vring, req->elem, len + sizeof(hdr));
req->elem = NULL;
s->num_reqs--; s->num_reqs--;
} }
static void complete_request_early(VirtIOBlockDataPlane *s, unsigned int head, static void complete_request_early(VirtIOBlockDataPlane *s, VirtQueueElement *elem,
QEMUIOVector *inhdr, unsigned char status) QEMUIOVector *inhdr, unsigned char status)
{ {
struct virtio_blk_inhdr hdr = { struct virtio_blk_inhdr hdr = {
@ -134,26 +134,26 @@ static void complete_request_early(VirtIOBlockDataPlane *s, unsigned int head,
qemu_iovec_destroy(inhdr); qemu_iovec_destroy(inhdr);
g_slice_free(QEMUIOVector, inhdr); g_slice_free(QEMUIOVector, inhdr);
vring_push(&s->vring, head, sizeof(hdr)); vring_push(&s->vring, elem, sizeof(hdr));
notify_guest(s); notify_guest(s);
} }
/* Get disk serial number */ /* Get disk serial number */
static void do_get_id_cmd(VirtIOBlockDataPlane *s, static void do_get_id_cmd(VirtIOBlockDataPlane *s,
struct iovec *iov, unsigned int iov_cnt, struct iovec *iov, unsigned int iov_cnt,
unsigned int head, QEMUIOVector *inhdr) VirtQueueElement *elem, QEMUIOVector *inhdr)
{ {
char id[VIRTIO_BLK_ID_BYTES]; char id[VIRTIO_BLK_ID_BYTES];
/* Serial number not NUL-terminated when shorter than buffer */ /* Serial number not NUL-terminated when shorter than buffer */
strncpy(id, s->blk->serial ? s->blk->serial : "", sizeof(id)); strncpy(id, s->blk->serial ? s->blk->serial : "", sizeof(id));
iov_from_buf(iov, iov_cnt, 0, id, sizeof(id)); iov_from_buf(iov, iov_cnt, 0, id, sizeof(id));
complete_request_early(s, head, inhdr, VIRTIO_BLK_S_OK); complete_request_early(s, elem, inhdr, VIRTIO_BLK_S_OK);
} }
static int do_rdwr_cmd(VirtIOBlockDataPlane *s, bool read, static int do_rdwr_cmd(VirtIOBlockDataPlane *s, bool read,
struct iovec *iov, unsigned int iov_cnt, struct iovec *iov, unsigned iov_cnt,
long long offset, unsigned int head, long long offset, VirtQueueElement *elem,
QEMUIOVector *inhdr) QEMUIOVector *inhdr)
{ {
struct iocb *iocb; struct iocb *iocb;
@ -186,19 +186,20 @@ static int do_rdwr_cmd(VirtIOBlockDataPlane *s, bool read,
/* Fill in virtio block metadata needed for completion */ /* Fill in virtio block metadata needed for completion */
VirtIOBlockRequest *req = container_of(iocb, VirtIOBlockRequest, iocb); VirtIOBlockRequest *req = container_of(iocb, VirtIOBlockRequest, iocb);
req->head = head; req->elem = elem;
req->inhdr = inhdr; req->inhdr = inhdr;
req->bounce_iov = bounce_iov; req->bounce_iov = bounce_iov;
req->read_qiov = read_qiov; req->read_qiov = read_qiov;
return 0; return 0;
} }
static int process_request(IOQueue *ioq, struct iovec iov[], static int process_request(IOQueue *ioq, VirtQueueElement *elem)
unsigned int out_num, unsigned int in_num,
unsigned int head)
{ {
VirtIOBlockDataPlane *s = container_of(ioq, VirtIOBlockDataPlane, ioqueue); VirtIOBlockDataPlane *s = container_of(ioq, VirtIOBlockDataPlane, ioqueue);
struct iovec *in_iov = &iov[out_num]; struct iovec *iov = elem->out_sg;
struct iovec *in_iov = elem->in_sg;
unsigned out_num = elem->out_num;
unsigned in_num = elem->in_num;
struct virtio_blk_outhdr outhdr; struct virtio_blk_outhdr outhdr;
QEMUIOVector *inhdr; QEMUIOVector *inhdr;
size_t in_size; size_t in_size;
@ -229,29 +230,29 @@ static int process_request(IOQueue *ioq, struct iovec iov[],
switch (outhdr.type) { switch (outhdr.type) {
case VIRTIO_BLK_T_IN: case VIRTIO_BLK_T_IN:
do_rdwr_cmd(s, true, in_iov, in_num, outhdr.sector * 512, head, inhdr); do_rdwr_cmd(s, true, in_iov, in_num, outhdr.sector * 512, elem, inhdr);
return 0; return 0;
case VIRTIO_BLK_T_OUT: case VIRTIO_BLK_T_OUT:
do_rdwr_cmd(s, false, iov, out_num, outhdr.sector * 512, head, inhdr); do_rdwr_cmd(s, false, iov, out_num, outhdr.sector * 512, elem, inhdr);
return 0; return 0;
case VIRTIO_BLK_T_SCSI_CMD: case VIRTIO_BLK_T_SCSI_CMD:
/* TODO support SCSI commands */ /* TODO support SCSI commands */
complete_request_early(s, head, inhdr, VIRTIO_BLK_S_UNSUPP); complete_request_early(s, elem, inhdr, VIRTIO_BLK_S_UNSUPP);
return 0; return 0;
case VIRTIO_BLK_T_FLUSH: case VIRTIO_BLK_T_FLUSH:
/* TODO fdsync not supported by Linux AIO, do it synchronously here! */ /* TODO fdsync not supported by Linux AIO, do it synchronously here! */
if (qemu_fdatasync(s->fd) < 0) { if (qemu_fdatasync(s->fd) < 0) {
complete_request_early(s, head, inhdr, VIRTIO_BLK_S_IOERR); complete_request_early(s, elem, inhdr, VIRTIO_BLK_S_IOERR);
} else { } else {
complete_request_early(s, head, inhdr, VIRTIO_BLK_S_OK); complete_request_early(s, elem, inhdr, VIRTIO_BLK_S_OK);
} }
return 0; return 0;
case VIRTIO_BLK_T_GET_ID: case VIRTIO_BLK_T_GET_ID:
do_get_id_cmd(s, in_iov, in_num, head, inhdr); do_get_id_cmd(s, in_iov, in_num, elem, inhdr);
return 0; return 0;
default: default:
@ -267,29 +268,8 @@ static void handle_notify(EventNotifier *e)
VirtIOBlockDataPlane *s = container_of(e, VirtIOBlockDataPlane, VirtIOBlockDataPlane *s = container_of(e, VirtIOBlockDataPlane,
host_notifier); host_notifier);
/* There is one array of iovecs into which all new requests are extracted VirtQueueElement *elem;
* from the vring. Requests are read from the vring and the translated int ret;
* descriptors are written to the iovecs array. The iovecs do not have to
* persist across handle_notify() calls because the kernel copies the
* iovecs on io_submit().
*
* Handling io_submit() EAGAIN may require storing the requests across
* handle_notify() calls until the kernel has sufficient resources to
* accept more I/O. This is not implemented yet.
*/
struct iovec iovec[VRING_MAX];
struct iovec *end = &iovec[VRING_MAX];
struct iovec *iov = iovec;
/* When a request is read from the vring, the index of the first descriptor
* (aka head) is returned so that the completed request can be pushed onto
* the vring later.
*
* The number of hypervisor read-only iovecs is out_num. The number of
* hypervisor write-only iovecs is in_num.
*/
int head;
unsigned int out_num = 0, in_num = 0;
unsigned int num_queued; unsigned int num_queued;
event_notifier_test_and_clear(&s->host_notifier); event_notifier_test_and_clear(&s->host_notifier);
@ -298,29 +278,31 @@ static void handle_notify(EventNotifier *e)
vring_disable_notification(s->vdev, &s->vring); vring_disable_notification(s->vdev, &s->vring);
for (;;) { for (;;) {
head = vring_pop(s->vdev, &s->vring, iov, end, &out_num, &in_num); ret = vring_pop(s->vdev, &s->vring, &elem);
if (head < 0) { if (ret < 0) {
assert(elem == NULL);
break; /* no more requests */ break; /* no more requests */
} }
trace_virtio_blk_data_plane_process_request(s, out_num, in_num, trace_virtio_blk_data_plane_process_request(s, elem->out_num,
head); elem->in_num, elem->index);
if (process_request(&s->ioqueue, iov, out_num, in_num, head) < 0) { if (process_request(&s->ioqueue, elem) < 0) {
vring_set_broken(&s->vring); vring_set_broken(&s->vring);
vring_free_element(elem);
ret = -EFAULT;
break; break;
} }
iov += out_num + in_num;
} }
if (likely(head == -EAGAIN)) { /* vring emptied */ if (likely(ret == -EAGAIN)) { /* vring emptied */
/* Re-enable guest->host notifies and stop processing the vring. /* Re-enable guest->host notifies and stop processing the vring.
* But if the guest has snuck in more descriptors, keep processing. * But if the guest has snuck in more descriptors, keep processing.
*/ */
if (vring_enable_notification(s->vdev, &s->vring)) { if (vring_enable_notification(s->vdev, &s->vring)) {
break; break;
} }
} else { /* head == -ENOBUFS or fatal error, iovecs[] is depleted */ } else { /* ret == -ENOBUFS or fatal error, iovecs[] is depleted */
/* Since there are no iovecs[] left, stop processing for now. Do /* Since there are no iovecs[] left, stop processing for now. Do
* not re-enable guest->host notifies since the I/O completion * not re-enable guest->host notifies since the I/O completion
* handler knows to check for more vring descriptors anyway. * handler knows to check for more vring descriptors anyway.

View file

@ -1 +1 @@
common-obj-y += hostmem.o vring.o common-obj-y += vring.o

View file

@ -1,183 +0,0 @@
/*
* Thread-safe guest to host memory mapping
*
* Copyright 2012 Red Hat, Inc. and/or its affiliates
*
* Authors:
* Stefan Hajnoczi <stefanha@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "exec/address-spaces.h"
#include "hw/virtio/dataplane/hostmem.h"
static int hostmem_lookup_cmp(const void *phys_, const void *region_)
{
hwaddr phys = *(const hwaddr *)phys_;
const HostMemRegion *region = region_;
if (phys < region->guest_addr) {
return -1;
} else if (phys >= region->guest_addr + region->size) {
return 1;
} else {
return 0;
}
}
/**
* Map guest physical address to host pointer
*/
void *hostmem_lookup(HostMem *hostmem, hwaddr phys, hwaddr len, bool is_write)
{
HostMemRegion *region;
void *host_addr = NULL;
hwaddr offset_within_region;
qemu_mutex_lock(&hostmem->current_regions_lock);
region = bsearch(&phys, hostmem->current_regions,
hostmem->num_current_regions,
sizeof(hostmem->current_regions[0]),
hostmem_lookup_cmp);
if (!region) {
goto out;
}
if (is_write && region->readonly) {
goto out;
}
offset_within_region = phys - region->guest_addr;
if (len <= region->size - offset_within_region) {
host_addr = region->host_addr + offset_within_region;
}
out:
qemu_mutex_unlock(&hostmem->current_regions_lock);
return host_addr;
}
/**
* Install new regions list
*/
static void hostmem_listener_commit(MemoryListener *listener)
{
HostMem *hostmem = container_of(listener, HostMem, listener);
int i;
qemu_mutex_lock(&hostmem->current_regions_lock);
for (i = 0; i < hostmem->num_current_regions; i++) {
memory_region_unref(hostmem->current_regions[i].mr);
}
g_free(hostmem->current_regions);
hostmem->current_regions = hostmem->new_regions;
hostmem->num_current_regions = hostmem->num_new_regions;
qemu_mutex_unlock(&hostmem->current_regions_lock);
/* Reset new regions list */
hostmem->new_regions = NULL;
hostmem->num_new_regions = 0;
}
/**
* Add a MemoryRegionSection to the new regions list
*/
static void hostmem_append_new_region(HostMem *hostmem,
MemoryRegionSection *section)
{
void *ram_ptr = memory_region_get_ram_ptr(section->mr);
size_t num = hostmem->num_new_regions;
size_t new_size = (num + 1) * sizeof(hostmem->new_regions[0]);
hostmem->new_regions = g_realloc(hostmem->new_regions, new_size);
hostmem->new_regions[num] = (HostMemRegion){
.host_addr = ram_ptr + section->offset_within_region,
.guest_addr = section->offset_within_address_space,
.size = int128_get64(section->size),
.readonly = section->readonly,
.mr = section->mr,
};
hostmem->num_new_regions++;
memory_region_ref(section->mr);
}
static void hostmem_listener_append_region(MemoryListener *listener,
MemoryRegionSection *section)
{
HostMem *hostmem = container_of(listener, HostMem, listener);
/* Ignore non-RAM regions, we may not be able to map them */
if (!memory_region_is_ram(section->mr)) {
return;
}
/* Ignore regions with dirty logging, we cannot mark them dirty */
if (memory_region_is_logging(section->mr)) {
return;
}
hostmem_append_new_region(hostmem, section);
}
/* We don't implement most MemoryListener callbacks, use these nop stubs */
static void hostmem_listener_dummy(MemoryListener *listener)
{
}
static void hostmem_listener_section_dummy(MemoryListener *listener,
MemoryRegionSection *section)
{
}
static void hostmem_listener_eventfd_dummy(MemoryListener *listener,
MemoryRegionSection *section,
bool match_data, uint64_t data,
EventNotifier *e)
{
}
static void hostmem_listener_coalesced_mmio_dummy(MemoryListener *listener,
MemoryRegionSection *section,
hwaddr addr, hwaddr len)
{
}
void hostmem_init(HostMem *hostmem)
{
memset(hostmem, 0, sizeof(*hostmem));
qemu_mutex_init(&hostmem->current_regions_lock);
hostmem->listener = (MemoryListener){
.begin = hostmem_listener_dummy,
.commit = hostmem_listener_commit,
.region_add = hostmem_listener_append_region,
.region_del = hostmem_listener_section_dummy,
.region_nop = hostmem_listener_append_region,
.log_start = hostmem_listener_section_dummy,
.log_stop = hostmem_listener_section_dummy,
.log_sync = hostmem_listener_section_dummy,
.log_global_start = hostmem_listener_dummy,
.log_global_stop = hostmem_listener_dummy,
.eventfd_add = hostmem_listener_eventfd_dummy,
.eventfd_del = hostmem_listener_eventfd_dummy,
.coalesced_mmio_add = hostmem_listener_coalesced_mmio_dummy,
.coalesced_mmio_del = hostmem_listener_coalesced_mmio_dummy,
.priority = 10,
};
memory_listener_register(&hostmem->listener, &address_space_memory);
if (hostmem->num_new_regions > 0) {
hostmem_listener_commit(&hostmem->listener);
}
}
void hostmem_finalize(HostMem *hostmem)
{
memory_listener_unregister(&hostmem->listener);
g_free(hostmem->new_regions);
g_free(hostmem->current_regions);
qemu_mutex_destroy(&hostmem->current_regions_lock);
}

View file

@ -15,9 +15,53 @@
*/ */
#include "trace.h" #include "trace.h"
#include "hw/hw.h"
#include "exec/memory.h"
#include "exec/address-spaces.h"
#include "hw/virtio/dataplane/vring.h" #include "hw/virtio/dataplane/vring.h"
#include "qemu/error-report.h" #include "qemu/error-report.h"
/* vring_map can be coupled with vring_unmap or (if you still have the
* value returned in *mr) memory_region_unref.
*/
static void *vring_map(MemoryRegion **mr, hwaddr phys, hwaddr len,
bool is_write)
{
MemoryRegionSection section = memory_region_find(get_system_memory(), phys, len);
if (!section.mr || int128_get64(section.size) < len) {
goto out;
}
if (is_write && section.readonly) {
goto out;
}
if (!memory_region_is_ram(section.mr)) {
goto out;
}
/* Ignore regions with dirty logging, we cannot mark them dirty */
if (memory_region_is_logging(section.mr)) {
goto out;
}
*mr = section.mr;
return memory_region_get_ram_ptr(section.mr) + section.offset_within_region;
out:
memory_region_unref(section.mr);
*mr = NULL;
return NULL;
}
static void vring_unmap(void *buffer, bool is_write)
{
ram_addr_t addr;
MemoryRegion *mr;
mr = qemu_ram_addr_from_host(buffer, &addr);
memory_region_unref(mr);
}
/* Map the guest's vring to host memory */ /* Map the guest's vring to host memory */
bool vring_setup(Vring *vring, VirtIODevice *vdev, int n) bool vring_setup(Vring *vring, VirtIODevice *vdev, int n)
{ {
@ -27,8 +71,7 @@ bool vring_setup(Vring *vring, VirtIODevice *vdev, int n)
vring->broken = false; vring->broken = false;
hostmem_init(&vring->hostmem); vring_ptr = vring_map(&vring->mr, vring_addr, vring_size, true);
vring_ptr = hostmem_lookup(&vring->hostmem, vring_addr, vring_size, true);
if (!vring_ptr) { if (!vring_ptr) {
error_report("Failed to map vring " error_report("Failed to map vring "
"addr %#" HWADDR_PRIx " size %" HWADDR_PRIu, "addr %#" HWADDR_PRIx " size %" HWADDR_PRIu,
@ -54,7 +97,7 @@ void vring_teardown(Vring *vring, VirtIODevice *vdev, int n)
virtio_queue_set_last_avail_idx(vdev, n, vring->last_avail_idx); virtio_queue_set_last_avail_idx(vdev, n, vring->last_avail_idx);
virtio_queue_invalidate_signalled_used(vdev, n); virtio_queue_invalidate_signalled_used(vdev, n);
hostmem_finalize(&vring->hostmem); memory_region_unref(vring->mr);
} }
/* Disable guest->host notifies */ /* Disable guest->host notifies */
@ -110,14 +153,61 @@ bool vring_should_notify(VirtIODevice *vdev, Vring *vring)
return vring_need_event(vring_used_event(&vring->vr), new, old); return vring_need_event(vring_used_event(&vring->vr), new, old);
} }
static int get_desc(Vring *vring, VirtQueueElement *elem,
struct vring_desc *desc)
{
unsigned *num;
struct iovec *iov;
hwaddr *addr;
MemoryRegion *mr;
if (desc->flags & VRING_DESC_F_WRITE) {
num = &elem->in_num;
iov = &elem->in_sg[*num];
addr = &elem->in_addr[*num];
} else {
num = &elem->out_num;
iov = &elem->out_sg[*num];
addr = &elem->out_addr[*num];
/* If it's an output descriptor, they're all supposed
* to come before any input descriptors. */
if (unlikely(elem->in_num)) {
error_report("Descriptor has out after in");
return -EFAULT;
}
}
/* Stop for now if there are not enough iovecs available. */
if (*num >= VIRTQUEUE_MAX_SIZE) {
return -ENOBUFS;
}
/* TODO handle non-contiguous memory across region boundaries */
iov->iov_base = vring_map(&mr, desc->addr, desc->len,
desc->flags & VRING_DESC_F_WRITE);
if (!iov->iov_base) {
error_report("Failed to map descriptor addr %#" PRIx64 " len %u",
(uint64_t)desc->addr, desc->len);
return -EFAULT;
}
/* The MemoryRegion is looked up again and unref'ed later, leave the
* ref in place. */
iov->iov_len = desc->len;
*addr = desc->addr;
*num += 1;
return 0;
}
/* This is stolen from linux/drivers/vhost/vhost.c. */ /* This is stolen from linux/drivers/vhost/vhost.c. */
static int get_indirect(Vring *vring, static int get_indirect(Vring *vring, VirtQueueElement *elem,
struct iovec iov[], struct iovec *iov_end,
unsigned int *out_num, unsigned int *in_num,
struct vring_desc *indirect) struct vring_desc *indirect)
{ {
struct vring_desc desc; struct vring_desc desc;
unsigned int i = 0, count, found = 0; unsigned int i = 0, count, found = 0;
int ret;
/* Sanity check */ /* Sanity check */
if (unlikely(indirect->len % sizeof(desc))) { if (unlikely(indirect->len % sizeof(desc))) {
@ -139,11 +229,12 @@ static int get_indirect(Vring *vring,
do { do {
struct vring_desc *desc_ptr; struct vring_desc *desc_ptr;
MemoryRegion *mr;
/* Translate indirect descriptor */ /* Translate indirect descriptor */
desc_ptr = hostmem_lookup(&vring->hostmem, desc_ptr = vring_map(&mr,
indirect->addr + found * sizeof(desc), indirect->addr + found * sizeof(desc),
sizeof(desc), false); sizeof(desc), false);
if (!desc_ptr) { if (!desc_ptr) {
error_report("Failed to map indirect descriptor " error_report("Failed to map indirect descriptor "
"addr %#" PRIx64 " len %zu", "addr %#" PRIx64 " len %zu",
@ -153,6 +244,7 @@ static int get_indirect(Vring *vring,
return -EFAULT; return -EFAULT;
} }
desc = *desc_ptr; desc = *desc_ptr;
memory_region_unref(mr);
/* Ensure descriptor has been loaded before accessing fields */ /* Ensure descriptor has been loaded before accessing fields */
barrier(); /* read_barrier_depends(); */ barrier(); /* read_barrier_depends(); */
@ -170,42 +262,35 @@ static int get_indirect(Vring *vring,
return -EFAULT; return -EFAULT;
} }
/* Stop for now if there are not enough iovecs available. */ ret = get_desc(vring, elem, &desc);
if (iov >= iov_end) { if (ret < 0) {
return -ENOBUFS; vring->broken |= (ret == -EFAULT);
} return ret;
iov->iov_base = hostmem_lookup(&vring->hostmem, desc.addr, desc.len,
desc.flags & VRING_DESC_F_WRITE);
if (!iov->iov_base) {
error_report("Failed to map indirect descriptor"
"addr %#" PRIx64 " len %u",
(uint64_t)desc.addr, desc.len);
vring->broken = true;
return -EFAULT;
}
iov->iov_len = desc.len;
iov++;
/* If this is an input descriptor, increment that count. */
if (desc.flags & VRING_DESC_F_WRITE) {
*in_num += 1;
} else {
/* If it's an output descriptor, they're all supposed
* to come before any input descriptors. */
if (unlikely(*in_num)) {
error_report("Indirect descriptor "
"has out after in: idx %u", i);
vring->broken = true;
return -EFAULT;
}
*out_num += 1;
} }
i = desc.next; i = desc.next;
} while (desc.flags & VRING_DESC_F_NEXT); } while (desc.flags & VRING_DESC_F_NEXT);
return 0; return 0;
} }
void vring_free_element(VirtQueueElement *elem)
{
int i;
/* This assumes that the iovecs, if changed, are never moved past
* the end of the valid area. This is true if iovec manipulations
* are done with iov_discard_front and iov_discard_back.
*/
for (i = 0; i < elem->out_num; i++) {
vring_unmap(elem->out_sg[i].iov_base, false);
}
for (i = 0; i < elem->in_num; i++) {
vring_unmap(elem->in_sg[i].iov_base, true);
}
g_slice_free(VirtQueueElement, elem);
}
/* This looks in the virtqueue and for the first available buffer, and converts /* This looks in the virtqueue and for the first available buffer, and converts
* it to an iovec for convenient access. Since descriptors consist of some * it to an iovec for convenient access. Since descriptors consist of some
* number of output then some number of input descriptors, it's actually two * number of output then some number of input descriptors, it's actually two
@ -218,16 +303,18 @@ static int get_indirect(Vring *vring,
* Stolen from linux/drivers/vhost/vhost.c. * Stolen from linux/drivers/vhost/vhost.c.
*/ */
int vring_pop(VirtIODevice *vdev, Vring *vring, int vring_pop(VirtIODevice *vdev, Vring *vring,
struct iovec iov[], struct iovec *iov_end, VirtQueueElement **p_elem)
unsigned int *out_num, unsigned int *in_num)
{ {
struct vring_desc desc; struct vring_desc desc;
unsigned int i, head, found = 0, num = vring->vr.num; unsigned int i, head, found = 0, num = vring->vr.num;
uint16_t avail_idx, last_avail_idx; uint16_t avail_idx, last_avail_idx;
VirtQueueElement *elem = NULL;
int ret;
/* If there was a fatal error then refuse operation */ /* If there was a fatal error then refuse operation */
if (vring->broken) { if (vring->broken) {
return -EFAULT; ret = -EFAULT;
goto out;
} }
/* Check it isn't doing very strange things with descriptor numbers. */ /* Check it isn't doing very strange things with descriptor numbers. */
@ -238,13 +325,14 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
if (unlikely((uint16_t)(avail_idx - last_avail_idx) > num)) { if (unlikely((uint16_t)(avail_idx - last_avail_idx) > num)) {
error_report("Guest moved used index from %u to %u", error_report("Guest moved used index from %u to %u",
last_avail_idx, avail_idx); last_avail_idx, avail_idx);
vring->broken = true; ret = -EFAULT;
return -EFAULT; goto out;
} }
/* If there's nothing new since last we looked. */ /* If there's nothing new since last we looked. */
if (avail_idx == last_avail_idx) { if (avail_idx == last_avail_idx) {
return -EAGAIN; ret = -EAGAIN;
goto out;
} }
/* Only get avail ring entries after they have been exposed by guest. */ /* Only get avail ring entries after they have been exposed by guest. */
@ -254,32 +342,33 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
* the index we've seen. */ * the index we've seen. */
head = vring->vr.avail->ring[last_avail_idx % num]; head = vring->vr.avail->ring[last_avail_idx % num];
elem = g_slice_new(VirtQueueElement);
elem->index = head;
elem->in_num = elem->out_num = 0;
/* If their number is silly, that's an error. */ /* If their number is silly, that's an error. */
if (unlikely(head >= num)) { if (unlikely(head >= num)) {
error_report("Guest says index %u > %u is available", head, num); error_report("Guest says index %u > %u is available", head, num);
vring->broken = true; ret = -EFAULT;
return -EFAULT; goto out;
} }
if (vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) { if (vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) {
vring_avail_event(&vring->vr) = vring->vr.avail->idx; vring_avail_event(&vring->vr) = vring->vr.avail->idx;
} }
/* When we start there are none of either input nor output. */
*out_num = *in_num = 0;
i = head; i = head;
do { do {
if (unlikely(i >= num)) { if (unlikely(i >= num)) {
error_report("Desc index is %u > %u, head = %u", i, num, head); error_report("Desc index is %u > %u, head = %u", i, num, head);
vring->broken = true; ret = -EFAULT;
return -EFAULT; goto out;
} }
if (unlikely(++found > num)) { if (unlikely(++found > num)) {
error_report("Loop detected: last one at %u vq size %u head %u", error_report("Loop detected: last one at %u vq size %u head %u",
i, num, head); i, num, head);
vring->broken = true; ret = -EFAULT;
return -EFAULT; goto out;
} }
desc = vring->vr.desc[i]; desc = vring->vr.desc[i];
@ -287,64 +376,50 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
barrier(); barrier();
if (desc.flags & VRING_DESC_F_INDIRECT) { if (desc.flags & VRING_DESC_F_INDIRECT) {
int ret = get_indirect(vring, iov, iov_end, out_num, in_num, &desc); int ret = get_indirect(vring, elem, &desc);
if (ret < 0) { if (ret < 0) {
return ret; goto out;
} }
continue; continue;
} }
/* If there are not enough iovecs left, stop for now. The caller ret = get_desc(vring, elem, &desc);
* should check if there are more descs available once they have dealt if (ret < 0) {
* with the current set. goto out;
*/
if (iov >= iov_end) {
return -ENOBUFS;
} }
/* TODO handle non-contiguous memory across region boundaries */
iov->iov_base = hostmem_lookup(&vring->hostmem, desc.addr, desc.len,
desc.flags & VRING_DESC_F_WRITE);
if (!iov->iov_base) {
error_report("Failed to map vring desc addr %#" PRIx64 " len %u",
(uint64_t)desc.addr, desc.len);
vring->broken = true;
return -EFAULT;
}
iov->iov_len = desc.len;
iov++;
if (desc.flags & VRING_DESC_F_WRITE) {
/* If this is an input descriptor,
* increment that count. */
*in_num += 1;
} else {
/* If it's an output descriptor, they're all supposed
* to come before any input descriptors. */
if (unlikely(*in_num)) {
error_report("Descriptor has out after in: idx %d", i);
vring->broken = true;
return -EFAULT;
}
*out_num += 1;
}
i = desc.next; i = desc.next;
} while (desc.flags & VRING_DESC_F_NEXT); } while (desc.flags & VRING_DESC_F_NEXT);
/* On success, increment avail index. */ /* On success, increment avail index. */
vring->last_avail_idx++; vring->last_avail_idx++;
*p_elem = elem;
return head; return head;
out:
assert(ret < 0);
if (ret == -EFAULT) {
vring->broken = true;
}
if (elem) {
vring_free_element(elem);
}
*p_elem = NULL;
return ret;
} }
/* After we've used one of their buffers, we tell them about it. /* After we've used one of their buffers, we tell them about it.
* *
* Stolen from linux/drivers/vhost/vhost.c. * Stolen from linux/drivers/vhost/vhost.c.
*/ */
void vring_push(Vring *vring, unsigned int head, int len) void vring_push(Vring *vring, VirtQueueElement *elem, int len)
{ {
struct vring_used_elem *used; struct vring_used_elem *used;
unsigned int head = elem->index;
uint16_t new; uint16_t new;
vring_free_element(elem);
/* Don't touch vring if a fatal error occurred */ /* Don't touch vring if a fatal error occurred */
if (vring->broken) { if (vring->broken) {
return; return;

View file

@ -394,8 +394,9 @@ void stream_start(BlockDriverState *bs, BlockDriverState *base,
/** /**
* commit_start: * commit_start:
* @bs: Top Block device * @bs: Active block device.
* @base: Block device that will be written into, and become the new top * @top: Top block device to be committed.
* @base: Block device that will be written into, and become the new top.
* @speed: The maximum speed, in bytes per second, or 0 for unlimited. * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
* @on_error: The action to take upon error. * @on_error: The action to take upon error.
* @cb: Completion function for the job. * @cb: Completion function for the job.
@ -407,7 +408,22 @@ void commit_start(BlockDriverState *bs, BlockDriverState *base,
BlockDriverState *top, int64_t speed, BlockDriverState *top, int64_t speed,
BlockdevOnError on_error, BlockDriverCompletionFunc *cb, BlockdevOnError on_error, BlockDriverCompletionFunc *cb,
void *opaque, Error **errp); void *opaque, Error **errp);
/**
* commit_active_start:
* @bs: Active block device to be committed.
* @base: Block device that will be written into, and become the new top.
* @speed: The maximum speed, in bytes per second, or 0 for unlimited.
* @on_error: The action to take upon error.
* @cb: Completion function for the job.
* @opaque: Opaque pointer value passed to @cb.
* @errp: Error object.
*
*/
void commit_active_start(BlockDriverState *bs, BlockDriverState *base,
int64_t speed,
BlockdevOnError on_error,
BlockDriverCompletionFunc *cb,
void *opaque, Error **errp);
/* /*
* mirror_start: * mirror_start:
* @bs: Block device to operate on. * @bs: Block device to operate on.

View file

@ -1,58 +0,0 @@
/*
* Thread-safe guest to host memory mapping
*
* Copyright 2012 Red Hat, Inc. and/or its affiliates
*
* Authors:
* Stefan Hajnoczi <stefanha@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#ifndef HOSTMEM_H
#define HOSTMEM_H
#include "exec/memory.h"
#include "qemu/thread.h"
typedef struct {
MemoryRegion *mr;
void *host_addr;
hwaddr guest_addr;
uint64_t size;
bool readonly;
} HostMemRegion;
typedef struct {
/* The listener is invoked when regions change and a new list of regions is
* built up completely before they are installed.
*/
MemoryListener listener;
HostMemRegion *new_regions;
size_t num_new_regions;
/* Current regions are accessed from multiple threads either to lookup
* addresses or to install a new list of regions. The lock protects the
* pointer and the regions.
*/
QemuMutex current_regions_lock;
HostMemRegion *current_regions;
size_t num_current_regions;
} HostMem;
void hostmem_init(HostMem *hostmem);
void hostmem_finalize(HostMem *hostmem);
/**
* Map a guest physical address to a pointer
*
* Note that there is map/unmap mechanism here. The caller must ensure that
* mapped memory is no longer used across events like hot memory unplug. This
* can be done with other mechanisms like bdrv_drain_all() that quiesce
* in-flight I/O.
*/
void *hostmem_lookup(HostMem *hostmem, hwaddr phys, hwaddr len, bool is_write);
#endif /* HOSTMEM_H */

View file

@ -19,11 +19,10 @@
#include <linux/virtio_ring.h> #include <linux/virtio_ring.h>
#include "qemu-common.h" #include "qemu-common.h"
#include "hostmem.h"
#include "hw/virtio/virtio.h" #include "hw/virtio/virtio.h"
typedef struct { typedef struct {
HostMem hostmem; /* guest memory mapper */ MemoryRegion *mr; /* memory region containing the vring */
struct vring vr; /* virtqueue vring mapped to host memory */ struct vring vr; /* virtqueue vring mapped to host memory */
uint16_t last_avail_idx; /* last processed avail ring index */ uint16_t last_avail_idx; /* last processed avail ring index */
uint16_t last_used_idx; /* last processed used ring index */ uint16_t last_used_idx; /* last processed used ring index */
@ -54,9 +53,8 @@ void vring_teardown(Vring *vring, VirtIODevice *vdev, int n);
void vring_disable_notification(VirtIODevice *vdev, Vring *vring); void vring_disable_notification(VirtIODevice *vdev, Vring *vring);
bool vring_enable_notification(VirtIODevice *vdev, Vring *vring); bool vring_enable_notification(VirtIODevice *vdev, Vring *vring);
bool vring_should_notify(VirtIODevice *vdev, Vring *vring); bool vring_should_notify(VirtIODevice *vdev, Vring *vring);
int vring_pop(VirtIODevice *vdev, Vring *vring, int vring_pop(VirtIODevice *vdev, Vring *vring, VirtQueueElement **elem);
struct iovec iov[], struct iovec *iov_end, void vring_push(Vring *vring, VirtQueueElement *elem, int len);
unsigned int *out_num, unsigned int *in_num); void vring_free_element(VirtQueueElement *elem);
void vring_push(Vring *vring, unsigned int head, int len);
#endif /* VRING_H */ #endif /* VRING_H */

View file

@ -1967,9 +1967,11 @@
# #
# @top: The file name of the backing image within the image chain, # @top: The file name of the backing image within the image chain,
# which contains the topmost data to be committed down. # which contains the topmost data to be committed down.
# Note, the active layer as 'top' is currently unsupported.
# #
# If top == base, that is an error. # If top == base, that is an error.
# If top == active, the job will not be completed by itself,
# user needs to complete the job with the block-job-complete
# command after getting the ready event. (Since 2.0)
# #
# #
# @speed: #optional the maximum speed, in bytes per second # @speed: #optional the maximum speed, in bytes per second
@ -1979,7 +1981,6 @@
# If @device does not exist, DeviceNotFound # If @device does not exist, DeviceNotFound
# If image commit is not supported by this device, NotSupported # If image commit is not supported by this device, NotSupported
# If @base or @top is invalid, a generic error is returned # If @base or @top is invalid, a generic error is returned
# If @top is the active layer, or omitted, a generic error is returned
# If @speed is invalid, InvalidParameter # If @speed is invalid, InvalidParameter
# #
# Since: 1.3 # Since: 1.3
@ -3022,7 +3023,7 @@
# #
# @devname: #optional path of the netmap device (default: '/dev/netmap'). # @devname: #optional path of the netmap device (default: '/dev/netmap').
# #
# Since 1.8 # Since 2.0
## ##
{ 'type': 'NetdevNetmapOptions', { 'type': 'NetdevNetmapOptions',
'data': { 'data': {

View file

@ -654,6 +654,21 @@ Supported options:
Specifies which VHD subformat to use. Valid options are Specifies which VHD subformat to use. Valid options are
@code{dynamic} (default) and @code{fixed}. @code{dynamic} (default) and @code{fixed}.
@end table @end table
@item VHDX
Hyper-V compatible image format (VHDX).
Supported options:
@table @code
@item subformat
Specifies which VHDX subformat to use. Valid options are
@code{dynamic} (default) and @code{fixed}.
@item block_state_zero
Force use of payload blocks of type 'ZERO'.
@item block_size
Block size; min 1 MB, max 256 MB. 0 means auto-calculate based on image size.
@item log_size
Log size; min 1 MB.
@end table
@end table @end table
@subsubsection Read-only formats @subsubsection Read-only formats

View file

@ -431,8 +431,8 @@ This option can only be enabled if @code{compat=1.1} is specified.
@item Other @item Other
QEMU also supports various other image file formats for compatibility with QEMU also supports various other image file formats for compatibility with
older QEMU versions or other hypervisors, including VMDK, VDI, VHD (vpc), qcow1 older QEMU versions or other hypervisors, including VMDK, VDI, VHD (vpc), VHDX,
and QED. For a full list of supported formats see @code{qemu-img --help}. qcow1 and QED. For a full list of supported formats see @code{qemu-img --help}.
For a more detailed description of these formats, see the QEMU Emulation User For a more detailed description of these formats, see the QEMU Emulation User
Documentation. Documentation.

View file

@ -39,6 +39,29 @@ class ImageCommitTestCase(iotests.QMPTestCase):
result = self.vm.qmp('query-block-jobs') result = self.vm.qmp('query-block-jobs')
self.assert_qmp(result, 'return', []) self.assert_qmp(result, 'return', [])
def run_commit_test(self, top, base):
self.assert_no_active_commit()
result = self.vm.qmp('block-commit', device='drive0', top=top, base=base)
self.assert_qmp(result, 'return', {})
completed = False
while not completed:
for event in self.vm.get_qmp_events(wait=True):
if event['event'] == 'BLOCK_JOB_COMPLETED':
self.assert_qmp(event, 'data/type', 'commit')
self.assert_qmp(event, 'data/device', 'drive0')
self.assert_qmp(event, 'data/offset', self.image_len)
self.assert_qmp(event, 'data/len', self.image_len)
completed = True
elif event['event'] == 'BLOCK_JOB_READY':
self.assert_qmp(event, 'data/type', 'commit')
self.assert_qmp(event, 'data/device', 'drive0')
self.assert_qmp(event, 'data/len', self.image_len)
self.vm.qmp('block-job-complete', device='drive0')
self.assert_no_active_commit()
self.vm.shutdown()
class TestSingleDrive(ImageCommitTestCase): class TestSingleDrive(ImageCommitTestCase):
image_len = 1 * 1024 * 1024 image_len = 1 * 1024 * 1024
test_len = 1 * 1024 * 256 test_len = 1 * 1024 * 256
@ -59,23 +82,7 @@ class TestSingleDrive(ImageCommitTestCase):
os.remove(backing_img) os.remove(backing_img)
def test_commit(self): def test_commit(self):
self.assert_no_active_commit() self.run_commit_test(mid_img, backing_img)
result = self.vm.qmp('block-commit', device='drive0', top='%s' % mid_img)
self.assert_qmp(result, 'return', {})
completed = False
while not completed:
for event in self.vm.get_qmp_events(wait=True):
if event['event'] == 'BLOCK_JOB_COMPLETED':
self.assert_qmp(event, 'data/type', 'commit')
self.assert_qmp(event, 'data/device', 'drive0')
self.assert_qmp(event, 'data/offset', self.image_len)
self.assert_qmp(event, 'data/len', self.image_len)
completed = True
self.assert_no_active_commit()
self.vm.shutdown()
self.assertEqual(-1, qemu_io('-c', 'read -P 0xab 0 524288', backing_img).find("verification failed")) self.assertEqual(-1, qemu_io('-c', 'read -P 0xab 0 524288', backing_img).find("verification failed"))
self.assertEqual(-1, qemu_io('-c', 'read -P 0xef 524288 524288', backing_img).find("verification failed")) self.assertEqual(-1, qemu_io('-c', 'read -P 0xef 524288 524288', backing_img).find("verification failed"))
@ -102,10 +109,9 @@ class TestSingleDrive(ImageCommitTestCase):
self.assert_qmp(result, 'error/desc', 'Base \'badfile\' not found') self.assert_qmp(result, 'error/desc', 'Base \'badfile\' not found')
def test_top_is_active(self): def test_top_is_active(self):
self.assert_no_active_commit() self.run_commit_test(test_img, backing_img)
result = self.vm.qmp('block-commit', device='drive0', top='%s' % test_img, base='%s' % backing_img) self.assertEqual(-1, qemu_io('-c', 'read -P 0xab 0 524288', backing_img).find("verification failed"))
self.assert_qmp(result, 'error/class', 'GenericError') self.assertEqual(-1, qemu_io('-c', 'read -P 0xef 524288 524288', backing_img).find("verification failed"))
self.assert_qmp(result, 'error/desc', 'Top image as the active layer is currently unsupported')
def test_top_and_base_reversed(self): def test_top_and_base_reversed(self):
self.assert_no_active_commit() self.assert_no_active_commit()
@ -166,23 +172,7 @@ class TestRelativePaths(ImageCommitTestCase):
raise raise
def test_commit(self): def test_commit(self):
self.assert_no_active_commit() self.run_commit_test(self.mid_img, self.backing_img)
result = self.vm.qmp('block-commit', device='drive0', top='%s' % self.mid_img)
self.assert_qmp(result, 'return', {})
completed = False
while not completed:
for event in self.vm.get_qmp_events(wait=True):
if event['event'] == 'BLOCK_JOB_COMPLETED':
self.assert_qmp(event, 'data/type', 'commit')
self.assert_qmp(event, 'data/device', 'drive0')
self.assert_qmp(event, 'data/offset', self.image_len)
self.assert_qmp(event, 'data/len', self.image_len)
completed = True
self.assert_no_active_commit()
self.vm.shutdown()
self.assertEqual(-1, qemu_io('-c', 'read -P 0xab 0 524288', self.backing_img_abs).find("verification failed")) self.assertEqual(-1, qemu_io('-c', 'read -P 0xab 0 524288', self.backing_img_abs).find("verification failed"))
self.assertEqual(-1, qemu_io('-c', 'read -P 0xef 524288 524288', self.backing_img_abs).find("verification failed")) self.assertEqual(-1, qemu_io('-c', 'read -P 0xef 524288 524288', self.backing_img_abs).find("verification failed"))
@ -209,10 +199,9 @@ class TestRelativePaths(ImageCommitTestCase):
self.assert_qmp(result, 'error/desc', 'Base \'badfile\' not found') self.assert_qmp(result, 'error/desc', 'Base \'badfile\' not found')
def test_top_is_active(self): def test_top_is_active(self):
self.assert_no_active_commit() self.run_commit_test(self.test_img, self.backing_img)
result = self.vm.qmp('block-commit', device='drive0', top='%s' % self.test_img, base='%s' % self.backing_img) self.assertEqual(-1, qemu_io('-c', 'read -P 0xab 0 524288', self.backing_img_abs).find("verification failed"))
self.assert_qmp(result, 'error/class', 'GenericError') self.assertEqual(-1, qemu_io('-c', 'read -P 0xef 524288 524288', self.backing_img_abs).find("verification failed"))
self.assert_qmp(result, 'error/desc', 'Top image as the active layer is currently unsupported')
def test_top_and_base_reversed(self): def test_top_and_base_reversed(self):
self.assert_no_active_commit() self.assert_no_active_commit()
@ -229,6 +218,7 @@ class TestSetSpeed(ImageCommitTestCase):
qemu_img('create', '-f', iotests.imgfmt, '-o', 'backing_file=%s' % backing_img, mid_img) qemu_img('create', '-f', iotests.imgfmt, '-o', 'backing_file=%s' % backing_img, mid_img)
qemu_img('create', '-f', iotests.imgfmt, '-o', 'backing_file=%s' % mid_img, test_img) qemu_img('create', '-f', iotests.imgfmt, '-o', 'backing_file=%s' % mid_img, test_img)
qemu_io('-c', 'write -P 0x1 0 512', test_img) qemu_io('-c', 'write -P 0x1 0 512', test_img)
qemu_io('-c', 'write -P 0xef 524288 524288', mid_img)
self.vm = iotests.VM().add_drive(test_img) self.vm = iotests.VM().add_drive(test_img)
self.vm.launch() self.vm.launch()

View file

@ -91,7 +91,6 @@ Testing: -drive if=virtio
QEMU X.Y.Z monitor - type 'help' for more information QEMU X.Y.Z monitor - type 'help' for more information
(qemu) QEMU_PROG: -drive if=virtio: Device needs media, but drive is empty (qemu) QEMU_PROG: -drive if=virtio: Device needs media, but drive is empty
QEMU_PROG: -drive if=virtio: Device initialization failed. QEMU_PROG: -drive if=virtio: Device initialization failed.
QEMU_PROG: -drive if=virtio: Device initialization failed.
QEMU_PROG: -drive if=virtio: Device 'virtio-blk-pci' could not be initialized QEMU_PROG: -drive if=virtio: Device 'virtio-blk-pci' could not be initialized
Testing: -drive if=scsi Testing: -drive if=scsi

View file

@ -80,6 +80,20 @@ echo "=== Testing big twoGbMaxExtentFlat ==="
IMGOPTS="subformat=twoGbMaxExtentFlat" _make_test_img 1000G IMGOPTS="subformat=twoGbMaxExtentFlat" _make_test_img 1000G
$QEMU_IMG info $TEST_IMG | _filter_testdir | sed -e 's/cid: [0-9]*/cid: XXXXXXXX/' $QEMU_IMG info $TEST_IMG | _filter_testdir | sed -e 's/cid: [0-9]*/cid: XXXXXXXX/'
echo
echo "=== Testing malformed VMFS extent description line ==="
cat >"$TEST_IMG" <<EOF
# Disk DescriptorFile
version=1
CID=58ab4847
parentCID=ffffffff
createType="vmfs"
# Extent description
RW 12582912 VMFS "dummy.vmdk" 1
EOF
_img_info
echo echo
echo "=== Testing version 3 ===" echo "=== Testing version 3 ==="
_use_sample_img iotest-version3.vmdk.bz2 _use_sample_img iotest-version3.vmdk.bz2

View file

@ -2038,6 +2038,11 @@ Format specific information:
filename: TEST_DIR/t-f500.vmdk filename: TEST_DIR/t-f500.vmdk
format: FLAT format: FLAT
=== Testing malformed VMFS extent description line ===
qemu-img: Could not open 'TEST_DIR/t.IMGFMT': Invalid extent lines:
RW 12582912 VMFS "dummy.IMGFMT" 1
=== Testing version 3 === === Testing version 3 ===
image: TEST_DIR/iotest-version3.IMGFMT image: TEST_DIR/iotest-version3.IMGFMT
file format: IMGFMT file format: IMGFMT