pc, pci, virtio: cleanups, fixes

a bunch of bugfixes and a couple of cleanups
 making these easier and/or making debugging easier
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJXmslFAAoJECgfDbjSjVRpev0IAMZghEuSeKMB2JR88dErS8P5
 J6y/1W2VFuRa1YBkTz/ecr5r2RwIO5teZUZpUkibM65Zo6bu1liMo6gbzeCg/xOi
 k437pNRl6W9RVWuXQM9VOegNoGYhX3Hrnu3iQeiT8KRY3OMCwG52umUXYVodJh1R
 mlozlEcSyUEDZVdNjhRECuUiw8RRcErEtiKda+zjkf4tPAGkyCItVpLYshE6A2/I
 lfQLkv+EWOyuD4cfEHl+4F9K9wegothFTSd/xBmcqqaWRc+pboMVF2A2yga+GjKm
 Xgb8SzQYkt9Q1nFr9fz89q89CsjhmfrD/ct/vJAcCFnw/dNXnC6mYjr6MDX0Gd0=
 =26Uu
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging

pc, pci, virtio: cleanups, fixes

a bunch of bugfixes and a couple of cleanups
making these easier and/or making debugging easier

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# gpg: Signature made Fri 29 Jul 2016 04:11:01 BST
# gpg:                using RSA key 0x281F0DB8D28D5469
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>"
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>"
# Primary key fingerprint: 0270 606B 6F3C DF3D 0B17  0970 C350 3912 AFBE 8E67
#      Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA  8A0D 281F 0DB8 D28D 5469

* remotes/mst/tags/for_upstream: (41 commits)
  mptsas: Fix a migration compatible issue
  vhost: do not update last avail idx on get_vring_base() failure
  vhost: add vhost_net_set_backend()
  vhost-user: add error report in vhost_user_write()
  tests: fix vhost-user-test leak
  tests: plug some leaks in virtio-net-test
  vhost-user: wait until backend init is completed
  char: add and use tcp_chr_wait_connected
  char: add chr_wait_connected callback
  vhost: add assert() to check runtime behaviour
  vhost-net: vhost_migration_done is vhost-user specific
  Revert "vhost-net: do not crash if backend is not present"
  vhost-user: add get_vhost_net() assertions
  vhost-user: keep vhost_net after a disconnection
  vhost-user: check vhost_user_{read,write}() return value
  vhost-user: check qemu_chr_fe_set_msgfds() return value
  vhost-user: call set_msgfds unconditionally
  qemu-char: fix qemu_chr_fe_set_msgfds() crash when disconnected
  vhost: use error_report() instead of fprintf(stderr,...)
  vhost: add missing VHOST_OPS_DEBUG
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2016-07-29 11:57:01 +01:00
commit cbe81c6331
24 changed files with 486 additions and 252 deletions

View file

@ -176,7 +176,7 @@ static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
int *fds, int fd_num)
{
CharDriverState *chr = dev->opaque;
int size = VHOST_USER_HDR_SIZE + msg->size;
int ret, size = VHOST_USER_HDR_SIZE + msg->size;
/*
* For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE,
@ -187,12 +187,19 @@ static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
return 0;
}
if (fd_num) {
qemu_chr_fe_set_msgfds(chr, fds, fd_num);
if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) {
error_report("Failed to set msg fds.");
return -1;
}
return qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size) == size ?
0 : -1;
ret = qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size);
if (ret != size) {
error_report("Failed to write msg."
" Wrote %d instead of %d.", ret, size);
return -1;
}
return 0;
}
static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
@ -214,12 +221,14 @@ static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
fds[fd_num++] = log->fd;
}
vhost_user_write(dev, &msg, fds, fd_num);
if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
return -1;
}
if (shmfd) {
msg.size = 0;
if (vhost_user_read(dev, &msg) < 0) {
return 0;
return -1;
}
if (msg.request != VHOST_USER_SET_LOG_BASE) {
@ -275,7 +284,9 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
msg.size += sizeof(msg.payload.memory.padding);
msg.size += fd_num * sizeof(VhostUserMemoryRegion);
vhost_user_write(dev, &msg, fds, fd_num);
if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
return -1;
}
return 0;
}
@ -290,7 +301,9 @@ static int vhost_user_set_vring_addr(struct vhost_dev *dev,
.size = sizeof(msg.payload.addr),
};
vhost_user_write(dev, &msg, NULL, 0);
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
return -1;
}
return 0;
}
@ -313,7 +326,9 @@ static int vhost_set_vring(struct vhost_dev *dev,
.size = sizeof(msg.payload.state),
};
vhost_user_write(dev, &msg, NULL, 0);
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
return -1;
}
return 0;
}
@ -360,10 +375,12 @@ static int vhost_user_get_vring_base(struct vhost_dev *dev,
.size = sizeof(msg.payload.state),
};
vhost_user_write(dev, &msg, NULL, 0);
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
return -1;
}
if (vhost_user_read(dev, &msg) < 0) {
return 0;
return -1;
}
if (msg.request != VHOST_USER_GET_VRING_BASE) {
@ -401,7 +418,9 @@ static int vhost_set_vring_file(struct vhost_dev *dev,
msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
}
vhost_user_write(dev, &msg, fds, fd_num);
if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
return -1;
}
return 0;
}
@ -427,7 +446,9 @@ static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
.size = sizeof(msg.payload.u64),
};
vhost_user_write(dev, &msg, NULL, 0);
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
return -1;
}
return 0;
}
@ -455,10 +476,12 @@ static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
return 0;
}
vhost_user_write(dev, &msg, NULL, 0);
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
return -1;
}
if (vhost_user_read(dev, &msg) < 0) {
return 0;
return -1;
}
if (msg.request != request) {
@ -489,7 +512,9 @@ static int vhost_user_set_owner(struct vhost_dev *dev)
.flags = VHOST_USER_VERSION,
};
vhost_user_write(dev, &msg, NULL, 0);
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
return -1;
}
return 0;
}
@ -501,7 +526,9 @@ static int vhost_user_reset_device(struct vhost_dev *dev)
.flags = VHOST_USER_VERSION,
};
vhost_user_write(dev, &msg, NULL, 0);
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
return -1;
}
return 0;
}
@ -588,7 +615,6 @@ static bool vhost_user_requires_shm_log(struct vhost_dev *dev)
static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr)
{
VhostUserMsg msg = { 0 };
int err;
assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
@ -605,8 +631,7 @@ static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr)
memcpy((char *)&msg.payload.u64, mac_addr, 6);
msg.size = sizeof(msg.payload.u64);
err = vhost_user_write(dev, &msg, NULL, 0);
return err;
return vhost_user_write(dev, &msg, NULL, 0);
}
return -1;
}

View file

@ -27,6 +27,18 @@
#include "hw/virtio/virtio-access.h"
#include "migration/migration.h"
/* enabled until disconnected backend stabilizes */
#define _VHOST_DEBUG 1
#ifdef _VHOST_DEBUG
#define VHOST_OPS_DEBUG(fmt, ...) \
do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
strerror(errno), errno); } while (0)
#else
#define VHOST_OPS_DEBUG(fmt, ...) \
do { } while (0)
#endif
static struct vhost_log *vhost_log;
static struct vhost_log *vhost_log_shm;
@ -362,6 +374,8 @@ static void vhost_log_put(struct vhost_dev *dev, bool sync)
if (!log) {
return;
}
dev->log = NULL;
dev->log_size = 0;
--log->refcnt;
if (log->refcnt == 0) {
@ -398,7 +412,10 @@ static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
/* inform backend of log switching, this must be done before
releasing the current log, to ensure no logging is lost */
r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
assert(r >= 0);
if (r < 0) {
VHOST_OPS_DEBUG("vhost_set_log_base failed");
}
vhost_log_put(dev, true);
dev->log = log;
dev->log_size = size;
@ -422,11 +439,11 @@ static int vhost_verify_ring_mappings(struct vhost_dev *dev,
l = vq->ring_size;
p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
if (!p || l != vq->ring_size) {
fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
error_report("Unable to map ring buffer for ring %d", i);
r = -ENOMEM;
}
if (p != vq->ring) {
fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
error_report("Ring buffer relocated for ring %d", i);
r = -EBUSY;
}
cpu_physical_memory_unmap(p, l, 0, 0);
@ -565,7 +582,9 @@ static void vhost_commit(MemoryListener *listener)
if (!dev->log_enabled) {
r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
assert(r >= 0);
if (r < 0) {
VHOST_OPS_DEBUG("vhost_set_mem_table failed");
}
dev->memory_changed = false;
return;
}
@ -578,7 +597,9 @@ static void vhost_commit(MemoryListener *listener)
vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
}
r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
assert(r >= 0);
if (r < 0) {
VHOST_OPS_DEBUG("vhost_set_mem_table failed");
}
/* To log less, can only decrease log size after table update. */
if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
vhost_dev_log_resize(dev, log_size);
@ -647,6 +668,7 @@ static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
};
int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
if (r < 0) {
VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
return -errno;
}
return 0;
@ -660,12 +682,15 @@ static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
features |= 0x1ULL << VHOST_F_LOG_ALL;
}
r = dev->vhost_ops->vhost_set_features(dev, features);
if (r < 0) {
VHOST_OPS_DEBUG("vhost_set_features failed");
}
return r < 0 ? -errno : 0;
}
static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
{
int r, t, i, idx;
int r, i, idx;
r = vhost_dev_set_features(dev, enable_log);
if (r < 0) {
goto err_features;
@ -682,12 +707,10 @@ static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
err_vq:
for (; i >= 0; --i) {
idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
t = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
dev->log_enabled);
assert(t >= 0);
vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
dev->log_enabled);
}
t = vhost_dev_set_features(dev, dev->log_enabled);
assert(t >= 0);
vhost_dev_set_features(dev, dev->log_enabled);
err_features:
return r;
}
@ -710,8 +733,6 @@ static int vhost_migration_log(MemoryListener *listener, int enable)
return r;
}
vhost_log_put(dev, false);
dev->log = NULL;
dev->log_size = 0;
} else {
vhost_dev_log_resize(dev, vhost_get_log_size(dev));
r = vhost_dev_set_log(dev, true);
@ -787,6 +808,7 @@ static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
return 0;
}
VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
if (errno == ENOTTY) {
error_report("vhost does not support cross-endian");
return -ENOSYS;
@ -815,12 +837,14 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
vq->num = state.num = virtio_queue_get_num(vdev, idx);
r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
if (r) {
VHOST_OPS_DEBUG("vhost_set_vring_num failed");
return -errno;
}
state.num = virtio_queue_get_last_avail_idx(vdev, idx);
r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
if (r) {
VHOST_OPS_DEBUG("vhost_set_vring_base failed");
return -errno;
}
@ -872,6 +896,7 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
if (r) {
VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
r = -errno;
goto fail_kick;
}
@ -919,25 +944,21 @@ static void vhost_virtqueue_stop(struct vhost_dev *dev,
r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
if (r < 0) {
fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
fflush(stderr);
VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r);
} else {
virtio_queue_set_last_avail_idx(vdev, idx, state.num);
}
virtio_queue_set_last_avail_idx(vdev, idx, state.num);
virtio_queue_invalidate_signalled_used(vdev, idx);
/* In the cross-endian case, we need to reset the vring endianness to
* native as legacy devices expect so by default.
*/
if (vhost_needs_vring_endian(vdev)) {
r = vhost_virtqueue_set_vring_endian_legacy(dev,
!virtio_is_big_endian(vdev),
vhost_vq_index);
if (r < 0) {
error_report("failed to reset vring endianness");
}
vhost_virtqueue_set_vring_endian_legacy(dev,
!virtio_is_big_endian(vdev),
vhost_vq_index);
}
assert (r >= 0);
cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
0, virtio_queue_get_ring_size(vdev, idx));
cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
@ -976,6 +997,7 @@ static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
if (r) {
VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
return r;
}
@ -997,6 +1019,7 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
file.fd = event_notifier_get_fd(&vq->masked_notifier);
r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
if (r) {
VHOST_OPS_DEBUG("vhost_set_vring_call failed");
r = -errno;
goto fail_call;
}
@ -1015,42 +1038,41 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
VhostBackendType backend_type, uint32_t busyloop_timeout)
{
uint64_t features;
int i, r;
int i, r, n_initialized_vqs = 0;
hdev->migration_blocker = NULL;
if (vhost_set_backend_type(hdev, backend_type) < 0) {
close((uintptr_t)opaque);
return -1;
}
r = vhost_set_backend_type(hdev, backend_type);
assert(r >= 0);
if (hdev->vhost_ops->vhost_backend_init(hdev, opaque) < 0) {
close((uintptr_t)opaque);
return -errno;
r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
if (r < 0) {
goto fail;
}
if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
fprintf(stderr, "vhost backend memory slots limit is less"
" than current number of present memory slots\n");
close((uintptr_t)opaque);
return -1;
error_report("vhost backend memory slots limit is less"
" than current number of present memory slots");
r = -1;
goto fail;
}
QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
r = hdev->vhost_ops->vhost_set_owner(hdev);
if (r < 0) {
VHOST_OPS_DEBUG("vhost_set_owner failed");
goto fail;
}
r = hdev->vhost_ops->vhost_get_features(hdev, &features);
if (r < 0) {
VHOST_OPS_DEBUG("vhost_get_features failed");
goto fail;
}
for (i = 0; i < hdev->nvqs; ++i) {
for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
if (r < 0) {
goto fail_vq;
goto fail;
}
}
@ -1105,38 +1127,43 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
hdev->started = false;
hdev->memory_changed = false;
memory_listener_register(&hdev->memory_listener, &address_space_memory);
QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
return 0;
fail_busyloop:
while (--i >= 0) {
vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
}
i = hdev->nvqs;
fail_vq:
while (--i >= 0) {
vhost_virtqueue_cleanup(hdev->vqs + i);
}
fail:
r = -errno;
hdev->vhost_ops->vhost_backend_cleanup(hdev);
QLIST_REMOVE(hdev, entry);
hdev->nvqs = n_initialized_vqs;
vhost_dev_cleanup(hdev);
return r;
}
void vhost_dev_cleanup(struct vhost_dev *hdev)
{
int i;
for (i = 0; i < hdev->nvqs; ++i) {
vhost_virtqueue_cleanup(hdev->vqs + i);
}
memory_listener_unregister(&hdev->memory_listener);
if (hdev->mem) {
/* those are only safe after successful init */
memory_listener_unregister(&hdev->memory_listener);
QLIST_REMOVE(hdev, entry);
}
if (hdev->migration_blocker) {
migrate_del_blocker(hdev->migration_blocker);
error_free(hdev->migration_blocker);
}
g_free(hdev->mem);
g_free(hdev->mem_sections);
hdev->vhost_ops->vhost_backend_cleanup(hdev);
QLIST_REMOVE(hdev, entry);
if (hdev->vhost_ops) {
hdev->vhost_ops->vhost_backend_cleanup(hdev);
}
assert(!hdev->log);
memset(hdev, 0, sizeof(struct vhost_dev));
}
/* Stop processing guest IO notifications in qemu.
@ -1148,8 +1175,9 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
VirtioBusState *vbus = VIRTIO_BUS(qbus);
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
int i, r, e;
if (!k->ioeventfd_started) {
fprintf(stderr, "binding does not support host notifiers\n");
error_report("binding does not support host notifiers");
r = -ENOSYS;
goto fail;
}
@ -1158,7 +1186,7 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
true);
if (r < 0) {
fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
error_report("vhost VQ %d notifier binding failed: %d", i, -r);
goto fail_vq;
}
}
@ -1169,8 +1197,7 @@ fail_vq:
e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
false);
if (e < 0) {
fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
fflush(stderr);
error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
}
assert (e >= 0);
}
@ -1192,8 +1219,7 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
false);
if (r < 0) {
fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
fflush(stderr);
error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
}
assert (r >= 0);
}
@ -1217,6 +1243,9 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
int r, index = n - hdev->vq_index;
struct vhost_vring_file file;
/* should only be called after backend is connected */
assert(hdev->vhost_ops);
if (mask) {
assert(vdev->use_guest_notifier_mask);
file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
@ -1226,7 +1255,9 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
assert(r >= 0);
if (r < 0) {
VHOST_OPS_DEBUG("vhost_set_vring_call failed");
}
}
uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
@ -1261,6 +1292,9 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
{
int i, r;
/* should only be called after backend is connected */
assert(hdev->vhost_ops);
hdev->started = true;
r = vhost_dev_set_features(hdev, hdev->log_enabled);
@ -1269,6 +1303,7 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
}
r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
if (r < 0) {
VHOST_OPS_DEBUG("vhost_set_mem_table failed");
r = -errno;
goto fail_mem;
}
@ -1293,6 +1328,7 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
hdev->log_size ? log_base : 0,
hdev->log);
if (r < 0) {
VHOST_OPS_DEBUG("vhost_set_log_base failed");
r = -errno;
goto fail_log;
}
@ -1321,6 +1357,9 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
{
int i;
/* should only be called after backend is connected */
assert(hdev->vhost_ops);
for (i = 0; i < hdev->nvqs; ++i) {
vhost_virtqueue_stop(hdev,
vdev,
@ -1330,7 +1369,14 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
vhost_log_put(hdev, true);
hdev->started = false;
hdev->log = NULL;
hdev->log_size = 0;
}
int vhost_net_set_backend(struct vhost_dev *hdev,
struct vhost_vring_file *file)
{
if (hdev->vhost_ops->vhost_net_set_backend) {
return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
}
return -1;
}

View file

@ -161,7 +161,7 @@ static bool virtio_pci_modern_state_needed(void *opaque)
{
VirtIOPCIProxy *proxy = opaque;
return !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
return virtio_pci_modern(proxy);
}
static const VMStateDescription vmstate_virtio_pci_modern_state = {
@ -300,8 +300,8 @@ static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
VirtQueue *vq = virtio_get_queue(vdev, n);
bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY);
bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
bool legacy = virtio_pci_legacy(proxy);
bool modern = virtio_pci_modern(proxy);
bool fast_mmio = kvm_ioeventfd_any_length_enabled();
bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
MemoryRegion *modern_mr = &proxy->notify.mr;
@ -1574,8 +1574,8 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
{
VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
VirtioBusState *bus = &proxy->bus;
bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY);
bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
bool legacy = virtio_pci_legacy(proxy);
bool modern = virtio_pci_modern(proxy);
bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
uint8_t *config;
uint32_t size;
@ -1694,7 +1694,7 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
static void virtio_pci_device_unplugged(DeviceState *d)
{
VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
bool modern = virtio_pci_modern(proxy);
bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
virtio_pci_stop_ioeventfd(proxy);
@ -1714,6 +1714,8 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
{
VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev);
bool pcie_port = pci_bus_is_express(pci_dev->bus) &&
!pci_bus_is_root(pci_dev->bus);
/*
* virtio pci bar layout used by default.
@ -1764,8 +1766,11 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
address_space_init(&proxy->modern_as, &proxy->modern_cfg, "virtio-pci-cfg-as");
if (pci_is_express(pci_dev) && pci_bus_is_express(pci_dev->bus) &&
!pci_bus_is_root(pci_dev->bus)) {
if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) {
proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
}
if (pcie_port && pci_is_express(pci_dev)) {
int pos;
pos = pcie_endpoint_cap_init(pci_dev, 0);
@ -1819,10 +1824,9 @@ static void virtio_pci_reset(DeviceState *qdev)
static Property virtio_pci_properties[] = {
DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false),
DEFINE_PROP_BIT("disable-legacy", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT, false),
DEFINE_PROP_BIT("disable-modern", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT, true),
DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy, disable_legacy,
ON_OFF_AUTO_AUTO),
DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy, disable_modern, false),
DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true),
DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags,
@ -1839,7 +1843,7 @@ static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp)
PCIDevice *pci_dev = &proxy->pci_dev;
if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) &&
!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN)) {
virtio_pci_modern(proxy)) {
pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
}
@ -2301,9 +2305,7 @@ static void virtio_input_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
DeviceState *vdev = DEVICE(&vinput->vdev);
qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
/* force virtio-1.0 */
vpci_dev->flags &= ~VIRTIO_PCI_FLAG_DISABLE_MODERN;
vpci_dev->flags |= VIRTIO_PCI_FLAG_DISABLE_LEGACY;
virtio_pci_force_virtio_1(vpci_dev);
object_property_set_bool(OBJECT(vdev), true, "realized", errp);
}

View file

@ -61,8 +61,6 @@ typedef struct VirtioBusClass VirtioPCIBusClass;
enum {
VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT,
VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT,
VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT,
VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT,
VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT,
VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT,
VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT,
@ -77,8 +75,6 @@ enum {
#define VIRTIO_PCI_FLAG_USE_IOEVENTFD (1 << VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT)
/* virtio version flags */
#define VIRTIO_PCI_FLAG_DISABLE_LEGACY (1 << VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT)
#define VIRTIO_PCI_FLAG_DISABLE_MODERN (1 << VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT)
#define VIRTIO_PCI_FLAG_DISABLE_PCIE (1 << VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT)
/* migrate extra state */
@ -144,6 +140,8 @@ struct VirtIOPCIProxy {
uint32_t modern_mem_bar;
int config_cap;
uint32_t flags;
bool disable_modern;
OnOffAuto disable_legacy;
uint32_t class_code;
uint32_t nvectors;
uint32_t dfselect;
@ -158,6 +156,21 @@ struct VirtIOPCIProxy {
VirtioBusState bus;
};
static inline bool virtio_pci_modern(VirtIOPCIProxy *proxy)
{
return !proxy->disable_modern;
}
static inline bool virtio_pci_legacy(VirtIOPCIProxy *proxy)
{
return proxy->disable_legacy == ON_OFF_AUTO_OFF;
}
static inline void virtio_pci_force_virtio_1(VirtIOPCIProxy *proxy)
{
proxy->disable_modern = false;
proxy->disable_legacy = ON_OFF_AUTO_ON;
}
/*
* virtio-scsi-pci: This extends VirtioPCIProxy.

View file

@ -458,6 +458,11 @@ static void virtqueue_map_desc(unsigned int *p_num_sg, hwaddr *addr, struct iove
unsigned num_sg = *p_num_sg;
assert(num_sg <= max_num_sg);
if (!sz) {
error_report("virtio: zero sized buffers are not allowed");
exit(1);
}
while (sz) {
hwaddr len = sz;