vhost: multiqueue support

This patch lets vhost support multiqueue. The idea is simple, just launching
multiple threads of vhost and let each of vhost thread processing a subset of
the virtqueues of the device. After this change each emulated device can have
multiple vhost threads as its backend.

To do this, a virtqueue index were introduced to record to first virtqueue that
will be handled by this vhost_net device. Based on this and nvqs, vhost could
calculate its relative index to setup vhost_net device.

Since we may have many vhost/net devices for a virtio-net device. The setting of
guest notifiers were moved out of the starting/stopping of a specific vhost
thread. The vhost_net_{start|stop}() were renamed to
vhost_net_{start|stop}_one(), and a new vhost_net_{start|stop}() were introduced
to configure the guest notifiers and start/stop all vhost/vhost_net devices.

Signed-off-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
This commit is contained in:
Jason Wang 2013-01-30 19:12:35 +08:00 committed by Anthony Liguori
parent 264986e2c8
commit a9f98bb5eb
5 changed files with 120 additions and 58 deletions

View file

@ -140,12 +140,21 @@ bool vhost_net_query(VHostNetState *net, VirtIODevice *dev)
return vhost_dev_query(&net->dev, dev);
}
int vhost_net_start(struct vhost_net *net,
VirtIODevice *dev)
static int vhost_net_start_one(struct vhost_net *net,
VirtIODevice *dev,
int vq_index)
{
struct vhost_vring_file file = { };
int r;
if (net->dev.started) {
return 0;
}
net->dev.nvqs = 2;
net->dev.vqs = net->vqs;
net->dev.vq_index = vq_index;
r = vhost_dev_enable_notifiers(&net->dev, dev);
if (r < 0) {
goto fail_notifiers;
@ -181,11 +190,15 @@ fail_notifiers:
return r;
}
void vhost_net_stop(struct vhost_net *net,
VirtIODevice *dev)
static void vhost_net_stop_one(struct vhost_net *net,
VirtIODevice *dev)
{
struct vhost_vring_file file = { .fd = -1 };
if (!net->dev.started) {
return;
}
for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
int r = ioctl(net->dev.control, VHOST_NET_SET_BACKEND, &file);
assert(r >= 0);
@ -195,6 +208,61 @@ void vhost_net_stop(struct vhost_net *net,
vhost_dev_disable_notifiers(&net->dev, dev);
}
int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
int total_queues)
{
int r, i = 0;
if (!dev->binding->set_guest_notifiers) {
error_report("binding does not support guest notifiers\n");
r = -ENOSYS;
goto err;
}
for (i = 0; i < total_queues; i++) {
r = vhost_net_start_one(tap_get_vhost_net(ncs[i].peer), dev, i * 2);
if (r < 0) {
goto err;
}
}
r = dev->binding->set_guest_notifiers(dev->binding_opaque,
total_queues * 2,
true);
if (r < 0) {
error_report("Error binding guest notifier: %d\n", -r);
goto err;
}
return 0;
err:
while (--i >= 0) {
vhost_net_stop_one(tap_get_vhost_net(ncs[i].peer), dev);
}
return r;
}
void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
int total_queues)
{
int i, r;
r = dev->binding->set_guest_notifiers(dev->binding_opaque,
total_queues * 2,
false);
if (r < 0) {
fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
fflush(stderr);
}
assert(r >= 0);
for (i = 0; i < total_queues; i++) {
vhost_net_stop_one(tap_get_vhost_net(ncs[i].peer), dev);
}
}
void vhost_net_cleanup(struct vhost_net *net)
{
vhost_dev_cleanup(&net->dev);
@ -224,13 +292,15 @@ bool vhost_net_query(VHostNetState *net, VirtIODevice *dev)
return false;
}
int vhost_net_start(struct vhost_net *net,
VirtIODevice *dev)
int vhost_net_start(VirtIODevice *dev,
NetClientState *ncs,
int total_queues)
{
return -ENOSYS;
}
void vhost_net_stop(struct vhost_net *net,
VirtIODevice *dev)
void vhost_net_stop(VirtIODevice *dev,
NetClientState *ncs,
int total_queues)
{
}