virtio,pc,pci: features, cleanups

infrastructure for vhost-vdpa shadow work
 piix south bridge rework
 reconnect for vhost-user-scsi
 dummy ACPI QTG DSM for cxl
 
 tests, cleanups, fixes all over the place
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmU06PMPHG1zdEByZWRo
 YXQuY29tAAoJECgfDbjSjVRpNIsH/0DlKti86VZLJ6PbNqsnKxoK2gg05TbEhPZU
 pQ+RPDaCHpFBsLC5qsoMJwvaEQFe0e49ZFemw7bXRzBxgmbbNnZ9ArCIPqT+rvQd
 7UBmyC+kacVyybZatq69aK2BHKFtiIRlT78d9Izgtjmp8V7oyKoz14Esh8wkE+FT
 ypHUa70Addi6alNm6BVkm7bxZxi0Wrmf3THqF8ViYvufzHKl7JR5e17fKWEG0BqV
 9W7AeHMnzJ7jkTvBGUw7g5EbzFn7hPLTbO4G/VW97k0puS4WRX5aIMkVhUazsRIa
 zDOuXCCskUWuRapiCwY0E4g7cCaT8/JR6JjjBaTgkjJgvo5Y8Eg=
 =ILek
 -----END PGP SIGNATURE-----

Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging

virtio,pc,pci: features, cleanups

infrastructure for vhost-vdpa shadow work
piix south bridge rework
reconnect for vhost-user-scsi
dummy ACPI QTG DSM for cxl

tests, cleanups, fixes all over the place

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# -----BEGIN PGP SIGNATURE-----
#
# iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmU06PMPHG1zdEByZWRo
# YXQuY29tAAoJECgfDbjSjVRpNIsH/0DlKti86VZLJ6PbNqsnKxoK2gg05TbEhPZU
# pQ+RPDaCHpFBsLC5qsoMJwvaEQFe0e49ZFemw7bXRzBxgmbbNnZ9ArCIPqT+rvQd
# 7UBmyC+kacVyybZatq69aK2BHKFtiIRlT78d9Izgtjmp8V7oyKoz14Esh8wkE+FT
# ypHUa70Addi6alNm6BVkm7bxZxi0Wrmf3THqF8ViYvufzHKl7JR5e17fKWEG0BqV
# 9W7AeHMnzJ7jkTvBGUw7g5EbzFn7hPLTbO4G/VW97k0puS4WRX5aIMkVhUazsRIa
# zDOuXCCskUWuRapiCwY0E4g7cCaT8/JR6JjjBaTgkjJgvo5Y8Eg=
# =ILek
# -----END PGP SIGNATURE-----
# gpg: Signature made Sun 22 Oct 2023 02:18:43 PDT
# gpg:                using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469
# gpg:                issuer "mst@redhat.com"
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [full]
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>" [full]
# Primary key fingerprint: 0270 606B 6F3C DF3D 0B17  0970 C350 3912 AFBE 8E67
#      Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA  8A0D 281F 0DB8 D28D 5469

* tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu: (62 commits)
  intel-iommu: Report interrupt remapping faults, fix return value
  MAINTAINERS: Add include/hw/intc/i8259.h to the PC chip section
  vhost-user: Fix protocol feature bit conflict
  tests/acpi: Update DSDT.cxl with QTG DSM
  hw/cxl: Add QTG _DSM support for ACPI0017 device
  tests/acpi: Allow update of DSDT.cxl
  hw/i386/cxl: ensure maxram is greater than ram size for calculating cxl range
  vhost-user: fix lost reconnect
  vhost-user-scsi: start vhost when guest kicks
  vhost-user-scsi: support reconnect to backend
  vhost: move and rename the conn retry times
  vhost-user-common: send get_inflight_fd once
  hw/i386/pc_piix: Make PIIX4 south bridge usable in PC machine
  hw/isa/piix: Implement multi-process QEMU support also for PIIX4
  hw/isa/piix: Resolve duplicate code regarding PCI interrupt wiring
  hw/isa/piix: Reuse PIIX3's PCI interrupt triggering in PIIX4
  hw/isa/piix: Rename functions to be shared for PCI interrupt triggering
  hw/isa/piix: Reuse PIIX3 base class' realize method in PIIX4
  hw/isa/piix: Share PIIX3's base class with PIIX4
  hw/isa/piix: Harmonize names of reset control memory regions
  ...

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2023-10-23 14:45:29 -07:00
commit 1b4a5a20da
43 changed files with 1218 additions and 836 deletions

View file

@ -619,39 +619,77 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
vhost_vdpa_net_client_stop(nc);
}
static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
size_t in_len)
static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s,
const struct iovec *out_sg, size_t out_num,
const struct iovec *in_sg, size_t in_num)
{
/* Buffers for the device */
const struct iovec out = {
.iov_base = s->cvq_cmd_out_buffer,
.iov_len = out_len,
};
const struct iovec in = {
.iov_base = s->status,
.iov_len = sizeof(virtio_net_ctrl_ack),
};
VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
int r;
r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL);
if (unlikely(r != 0)) {
if (unlikely(r == -ENOSPC)) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
__func__);
}
return r;
}
/*
* We can poll here since we've had BQL from the time we sent the
* descriptor. Also, we need to take the answer before SVQ pulls by itself,
* when BQL is released
*/
return vhost_svq_poll(svq, 1);
return r;
}
static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
/*
* Convenience wrapper to poll SVQ for multiple control commands.
*
* Caller should hold the BQL when invoking this function, and should take
* the answer before SVQ pulls by itself when BQL is released.
*/
static ssize_t vhost_vdpa_net_svq_poll(VhostVDPAState *s, size_t cmds_in_flight)
{
VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
return vhost_svq_poll(svq, cmds_in_flight);
}
static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s,
struct iovec *out_cursor,
struct iovec *in_cursor)
{
/* reset the cursor of the output buffer for the device */
out_cursor->iov_base = s->cvq_cmd_out_buffer;
out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
/* reset the cursor of the in buffer for the device */
in_cursor->iov_base = s->status;
in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
}
/*
* Poll SVQ for multiple pending control commands and check the device's ack.
*
* Caller should hold the BQL when invoking this function.
*
* @s: The VhostVDPAState
* @len: The length of the pending status shadow buffer
*/
static ssize_t vhost_vdpa_net_svq_flush(VhostVDPAState *s, size_t len)
{
/* device uses a one-byte length ack for each control command */
ssize_t dev_written = vhost_vdpa_net_svq_poll(s, len);
if (unlikely(dev_written != len)) {
return -EIO;
}
/* check the device's ack */
for (int i = 0; i < len; ++i) {
if (s->status[i] != VIRTIO_NET_OK) {
return -EIO;
}
}
return 0;
}
static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s,
struct iovec *out_cursor,
struct iovec *in_cursor, uint8_t class,
uint8_t cmd, const struct iovec *data_sg,
size_t data_num)
{
@ -659,36 +697,72 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
.class = class,
.cmd = cmd,
};
size_t data_size = iov_size(data_sg, data_num);
size_t data_size = iov_size(data_sg, data_num), cmd_size;
struct iovec out, in;
ssize_t r;
unsigned dummy_cursor_iov_cnt;
VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
cmd_size = sizeof(ctrl) + data_size;
if (vhost_svq_available_slots(svq) < 2 ||
iov_size(out_cursor, 1) < cmd_size) {
/*
* It is time to flush all pending control commands if SVQ is full
* or control commands shadow buffers are full.
*
* We can poll here since we've had BQL from the time
* we sent the descriptor.
*/
r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base -
(void *)s->status);
if (unlikely(r < 0)) {
return r;
}
vhost_vdpa_net_load_cursor_reset(s, out_cursor, in_cursor);
}
/* pack the CVQ command header */
memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
iov_from_buf(out_cursor, 1, 0, &ctrl, sizeof(ctrl));
/* pack the CVQ command command-specific-data */
iov_to_buf(data_sg, data_num, 0,
s->cvq_cmd_out_buffer + sizeof(ctrl), data_size);
out_cursor->iov_base + sizeof(ctrl), data_size);
return vhost_vdpa_net_cvq_add(s, data_size + sizeof(ctrl),
sizeof(virtio_net_ctrl_ack));
/* extract the required buffer from the cursor for output */
iov_copy(&out, 1, out_cursor, 1, 0, cmd_size);
/* extract the required buffer from the cursor for input */
iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status));
r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
if (unlikely(r < 0)) {
return r;
}
/* iterate the cursors */
dummy_cursor_iov_cnt = 1;
iov_discard_front(&out_cursor, &dummy_cursor_iov_cnt, cmd_size);
dummy_cursor_iov_cnt = 1;
iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status));
return 0;
}
static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n,
struct iovec *out_cursor,
struct iovec *in_cursor)
{
if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
const struct iovec data = {
.iov_base = (void *)n->mac,
.iov_len = sizeof(n->mac),
};
ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_ADDR_SET,
&data, 1);
if (unlikely(dev_written < 0)) {
return dev_written;
}
if (*s->status != VIRTIO_NET_OK) {
return -EIO;
ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_ADDR_SET,
&data, 1);
if (unlikely(r < 0)) {
return r;
}
}
@ -733,25 +807,24 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
.iov_len = mul_macs_size,
},
};
ssize_t dev_written = vhost_vdpa_net_load_cmd(s,
VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_TABLE_SET,
data, ARRAY_SIZE(data));
if (unlikely(dev_written < 0)) {
return dev_written;
}
if (*s->status != VIRTIO_NET_OK) {
return -EIO;
ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_TABLE_SET,
data, ARRAY_SIZE(data));
if (unlikely(r < 0)) {
return r;
}
return 0;
}
static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
const VirtIONet *n)
const VirtIONet *n,
struct iovec *out_cursor,
struct iovec *in_cursor)
{
struct virtio_net_ctrl_mq mq;
ssize_t dev_written;
ssize_t r;
if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
return 0;
@ -762,24 +835,24 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
.iov_base = &mq,
.iov_len = sizeof(mq),
};
dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ,
VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
&data, 1);
if (unlikely(dev_written < 0)) {
return dev_written;
}
if (*s->status != VIRTIO_NET_OK) {
return -EIO;
r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_MQ,
VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
&data, 1);
if (unlikely(r < 0)) {
return r;
}
return 0;
}
static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
const VirtIONet *n)
const VirtIONet *n,
struct iovec *out_cursor,
struct iovec *in_cursor)
{
uint64_t offloads;
ssize_t dev_written;
ssize_t r;
if (!virtio_vdev_has_feature(&n->parent_obj,
VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
@ -807,20 +880,20 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
.iov_base = &offloads,
.iov_len = sizeof(offloads),
};
dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
&data, 1);
if (unlikely(dev_written < 0)) {
return dev_written;
}
if (*s->status != VIRTIO_NET_OK) {
return -EIO;
r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_GUEST_OFFLOADS,
VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
&data, 1);
if (unlikely(r < 0)) {
return r;
}
return 0;
}
static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
struct iovec *out_cursor,
struct iovec *in_cursor,
uint8_t cmd,
uint8_t on)
{
@ -828,14 +901,23 @@ static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
.iov_base = &on,
.iov_len = sizeof(on),
};
return vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_RX,
cmd, &data, 1);
ssize_t r;
r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_RX, cmd, &data, 1);
if (unlikely(r < 0)) {
return r;
}
return 0;
}
static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
const VirtIONet *n)
const VirtIONet *n,
struct iovec *out_cursor,
struct iovec *in_cursor)
{
ssize_t dev_written;
ssize_t r;
if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) {
return 0;
@ -860,13 +942,10 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (!n->mac_table.uni_overflow && !n->promisc) {
dev_written = vhost_vdpa_net_load_rx_mode(s,
VIRTIO_NET_CTRL_RX_PROMISC, 0);
if (unlikely(dev_written < 0)) {
return dev_written;
}
if (*s->status != VIRTIO_NET_OK) {
return -EIO;
r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_RX_PROMISC, 0);
if (unlikely(r < 0)) {
return r;
}
}
@ -888,13 +967,10 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->mac_table.multi_overflow || n->allmulti) {
dev_written = vhost_vdpa_net_load_rx_mode(s,
VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
if (unlikely(dev_written < 0)) {
return dev_written;
}
if (*s->status != VIRTIO_NET_OK) {
return -EIO;
r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
if (unlikely(r < 0)) {
return r;
}
}
@ -913,13 +989,10 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->alluni) {
dev_written = vhost_vdpa_net_load_rx_mode(s,
VIRTIO_NET_CTRL_RX_ALLUNI, 1);
if (dev_written < 0) {
return dev_written;
}
if (*s->status != VIRTIO_NET_OK) {
return -EIO;
r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_RX_ALLUNI, 1);
if (r < 0) {
return r;
}
}
@ -934,13 +1007,10 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->nomulti) {
dev_written = vhost_vdpa_net_load_rx_mode(s,
VIRTIO_NET_CTRL_RX_NOMULTI, 1);
if (dev_written < 0) {
return dev_written;
}
if (*s->status != VIRTIO_NET_OK) {
return -EIO;
r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_RX_NOMULTI, 1);
if (r < 0) {
return r;
}
}
@ -955,13 +1025,10 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->nouni) {
dev_written = vhost_vdpa_net_load_rx_mode(s,
VIRTIO_NET_CTRL_RX_NOUNI, 1);
if (dev_written < 0) {
return dev_written;
}
if (*s->status != VIRTIO_NET_OK) {
return -EIO;
r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_RX_NOUNI, 1);
if (r < 0) {
return r;
}
}
@ -976,13 +1043,10 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->nobcast) {
dev_written = vhost_vdpa_net_load_rx_mode(s,
VIRTIO_NET_CTRL_RX_NOBCAST, 1);
if (dev_written < 0) {
return dev_written;
}
if (*s->status != VIRTIO_NET_OK) {
return -EIO;
r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_RX_NOBCAST, 1);
if (r < 0) {
return r;
}
}
@ -991,27 +1055,29 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
const VirtIONet *n,
struct iovec *out_cursor,
struct iovec *in_cursor,
uint16_t vid)
{
const struct iovec data = {
.iov_base = &vid,
.iov_len = sizeof(vid),
};
ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD,
&data, 1);
if (unlikely(dev_written < 0)) {
return dev_written;
}
if (unlikely(*s->status != VIRTIO_NET_OK)) {
return -EIO;
ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD,
&data, 1);
if (unlikely(r < 0)) {
return r;
}
return 0;
}
static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
const VirtIONet *n)
const VirtIONet *n,
struct iovec *out_cursor,
struct iovec *in_cursor)
{
int r;
@ -1022,7 +1088,8 @@ static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
for (int i = 0; i < MAX_VLAN >> 5; i++) {
for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
if (n->vlans[i] & (1U << j)) {
r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);
r = vhost_vdpa_net_load_single_vlan(s, n, out_cursor,
in_cursor, (i << 5) + j);
if (unlikely(r != 0)) {
return r;
}
@ -1039,6 +1106,7 @@ static int vhost_vdpa_net_cvq_load(NetClientState *nc)
struct vhost_vdpa *v = &s->vhost_vdpa;
const VirtIONet *n;
int r;
struct iovec out_cursor, in_cursor;
assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
@ -1046,23 +1114,35 @@ static int vhost_vdpa_net_cvq_load(NetClientState *nc)
if (v->shadow_vqs_enabled) {
n = VIRTIO_NET(v->dev->vdev);
r = vhost_vdpa_net_load_mac(s, n);
vhost_vdpa_net_load_cursor_reset(s, &out_cursor, &in_cursor);
r = vhost_vdpa_net_load_mac(s, n, &out_cursor, &in_cursor);
if (unlikely(r < 0)) {
return r;
}
r = vhost_vdpa_net_load_mq(s, n);
r = vhost_vdpa_net_load_mq(s, n, &out_cursor, &in_cursor);
if (unlikely(r)) {
return r;
}
r = vhost_vdpa_net_load_offloads(s, n);
r = vhost_vdpa_net_load_offloads(s, n, &out_cursor, &in_cursor);
if (unlikely(r)) {
return r;
}
r = vhost_vdpa_net_load_rx(s, n);
r = vhost_vdpa_net_load_rx(s, n, &out_cursor, &in_cursor);
if (unlikely(r)) {
return r;
}
r = vhost_vdpa_net_load_vlan(s, n);
r = vhost_vdpa_net_load_vlan(s, n, &out_cursor, &in_cursor);
if (unlikely(r)) {
return r;
}
/*
* We need to poll and check all pending device's used buffers.
*
* We can poll here since we've had BQL from the time
* we sent the descriptor.
*/
r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status);
if (unlikely(r)) {
return r;
}
@ -1115,12 +1195,14 @@ static NetClientInfo net_vhost_vdpa_cvq_info = {
*/
static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
VirtQueueElement *elem,
struct iovec *out)
struct iovec *out,
const struct iovec *in)
{
struct virtio_net_ctrl_mac mac_data, *mac_ptr;
struct virtio_net_ctrl_hdr *hdr_ptr;
uint32_t cursor;
ssize_t r;
uint8_t on = 1;
/* parse the non-multicast MAC address entries from CVQ command */
cursor = sizeof(*hdr_ptr);
@ -1168,10 +1250,25 @@ static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
* filter table to the vdpa device, it should send the
* VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
*/
r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 1);
hdr_ptr = out->iov_base;
out->iov_len = sizeof(*hdr_ptr) + sizeof(on);
hdr_ptr->class = VIRTIO_NET_CTRL_RX;
hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC;
iov_from_buf(out, 1, sizeof(*hdr_ptr), &on, sizeof(on));
r = vhost_vdpa_net_cvq_add(s, out, 1, in, 1);
if (unlikely(r < 0)) {
return r;
}
/*
* We can poll here since we've had BQL from the time
* we sent the descriptor.
*/
r = vhost_vdpa_net_svq_poll(s, 1);
if (unlikely(r < sizeof(*s->status))) {
return r;
}
if (*s->status != VIRTIO_NET_OK) {
return sizeof(*s->status);
}
@ -1249,10 +1346,15 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
.iov_base = s->cvq_cmd_out_buffer,
};
/* in buffer used for device model */
const struct iovec in = {
const struct iovec model_in = {
.iov_base = &status,
.iov_len = sizeof(status),
};
/* in buffer used for vdpa device */
const struct iovec vdpa_in = {
.iov_base = s->status,
.iov_len = sizeof(*s->status),
};
ssize_t dev_written = -EINVAL;
out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
@ -1281,15 +1383,23 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
* the CVQ command directly.
*/
dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem,
&out);
&out, &vdpa_in);
if (unlikely(dev_written < 0)) {
goto out;
}
} else {
dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
if (unlikely(dev_written < 0)) {
ssize_t r;
r = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1);
if (unlikely(r < 0)) {
dev_written = r;
goto out;
}
/*
* We can poll here since we've had BQL from the time
* we sent the descriptor.
*/
dev_written = vhost_vdpa_net_svq_poll(s, 1);
}
if (unlikely(dev_written < sizeof(status))) {
@ -1302,7 +1412,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
}
status = VIRTIO_NET_ERR;
virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1);
if (status != VIRTIO_NET_OK) {
error_report("Bad CVQ processing in model");
}