-----BEGIN PGP SIGNATURE-----

Version: GnuPG v1
 
 iQEcBAABAgAGBQJjMqL9AAoJEO8Ells5jWIRUKcH/iNuJpxMXG18pGteBiTu3/ut
 KRR9u1nLZZXA2/02NTOYnrrHcplFQkEBXNHaEintWfctKHIP/llY8LDVriDFM+6N
 4PzwLGLe7R9S7rfgt/xMDY0nFESFph5XyVTCxAAUm3Exhm8vIg1FM8Tep8lW/taW
 pliDa0K/9pQAfIN+eCnMUtH2JUttak8RwvAg5rXBg7XUx48ZTQn1o7aYYTPOAC2v
 RWkX0BKc7FVK5maAhe6Ugrcf6v4R2mDIAvnr+Anvo67SfgFZ5MtCllr0liJ4h3Vd
 +/PlsBDJotvht3QZVva1tn1Jk5rhN8N8lZbVOuMsklU/tX3Xrj99HJNETLXks2k=
 =82t6
 -----END PGP SIGNATURE-----

Merge tag 'net-pull-request' of https://github.com/jasowang/qemu into staging

# -----BEGIN PGP SIGNATURE-----
# Version: GnuPG v1
#
# iQEcBAABAgAGBQJjMqL9AAoJEO8Ells5jWIRUKcH/iNuJpxMXG18pGteBiTu3/ut
# KRR9u1nLZZXA2/02NTOYnrrHcplFQkEBXNHaEintWfctKHIP/llY8LDVriDFM+6N
# 4PzwLGLe7R9S7rfgt/xMDY0nFESFph5XyVTCxAAUm3Exhm8vIg1FM8Tep8lW/taW
# pliDa0K/9pQAfIN+eCnMUtH2JUttak8RwvAg5rXBg7XUx48ZTQn1o7aYYTPOAC2v
# RWkX0BKc7FVK5maAhe6Ugrcf6v4R2mDIAvnr+Anvo67SfgFZ5MtCllr0liJ4h3Vd
# +/PlsBDJotvht3QZVva1tn1Jk5rhN8N8lZbVOuMsklU/tX3Xrj99HJNETLXks2k=
# =82t6
# -----END PGP SIGNATURE-----
# gpg: Signature made Tue 27 Sep 2022 03:15:09 EDT
# gpg:                using RSA key EF04965B398D6211
# gpg: Good signature from "Jason Wang (Jason Wang on RedHat) <jasowang@redhat.com>" [full]
# Primary key fingerprint: 215D 46F4 8246 689E C77F  3562 EF04 965B 398D 6211

* tag 'net-pull-request' of https://github.com/jasowang/qemu:
  virtio: del net client if net_init_tap_one failed
  vdpa: Allow MQ feature in SVQ
  virtio-net: Update virtio-net curr_queue_pairs in vdpa backends
  vdpa: validate MQ CVQ commands
  vdpa: Add vhost_vdpa_net_load_mq
  vdpa: extract vhost_vdpa_net_load_mac from vhost_vdpa_net_load
  vdpa: Make VhostVDPAState cvq_cmd_in_buffer control ack type
  e1000e: set RX desc status with DD flag in a separate operation

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2022-09-27 11:08:36 -04:00
commit dbc4f48b5a
4 changed files with 158 additions and 51 deletions

View file

@ -1364,6 +1364,57 @@ struct NetRxPkt *pkt, const E1000E_RSSInfo *rss_info,
}
}
static inline void
e1000e_pci_dma_write_rx_desc(E1000ECore *core, dma_addr_t addr,
uint8_t *desc, dma_addr_t len)
{
PCIDevice *dev = core->owner;
if (e1000e_rx_use_legacy_descriptor(core)) {
struct e1000_rx_desc *d = (struct e1000_rx_desc *) desc;
size_t offset = offsetof(struct e1000_rx_desc, status);
uint8_t status = d->status;
d->status &= ~E1000_RXD_STAT_DD;
pci_dma_write(dev, addr, desc, len);
if (status & E1000_RXD_STAT_DD) {
d->status = status;
pci_dma_write(dev, addr + offset, &status, sizeof(status));
}
} else {
if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) {
union e1000_rx_desc_packet_split *d =
(union e1000_rx_desc_packet_split *) desc;
size_t offset = offsetof(union e1000_rx_desc_packet_split,
wb.middle.status_error);
uint32_t status = d->wb.middle.status_error;
d->wb.middle.status_error &= ~E1000_RXD_STAT_DD;
pci_dma_write(dev, addr, desc, len);
if (status & E1000_RXD_STAT_DD) {
d->wb.middle.status_error = status;
pci_dma_write(dev, addr + offset, &status, sizeof(status));
}
} else {
union e1000_rx_desc_extended *d =
(union e1000_rx_desc_extended *) desc;
size_t offset = offsetof(union e1000_rx_desc_extended,
wb.upper.status_error);
uint32_t status = d->wb.upper.status_error;
d->wb.upper.status_error &= ~E1000_RXD_STAT_DD;
pci_dma_write(dev, addr, desc, len);
if (status & E1000_RXD_STAT_DD) {
d->wb.upper.status_error = status;
pci_dma_write(dev, addr + offset, &status, sizeof(status));
}
}
}
}
typedef struct e1000e_ba_state_st {
uint16_t written[MAX_PS_BUFFERS];
uint8_t cur_idx;
@ -1600,7 +1651,7 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
e1000e_write_rx_descr(core, desc, is_last ? core->rx_pkt : NULL,
rss_info, do_ps ? ps_hdr_len : 0, &bastate.written);
pci_dma_write(d, base, &desc, core->rx_desc_len);
e1000e_pci_dma_write_rx_desc(core, base, desc, core->rx_desc_len);
e1000e_ring_advance(core, rxi,
core->rx_desc_len / E1000_MIN_RX_DESC_LEN);

View file

@ -1412,19 +1412,14 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
return VIRTIO_NET_ERR;
}
/* Avoid changing the number of queue_pairs for vdpa device in
* userspace handler. A future fix is needed to handle the mq
* change in userspace handler with vhost-vdpa. Let's disable
* the mq handling from userspace for now and only allow get
* done through the kernel. Ripples may be seen when falling
* back to userspace, but without doing it qemu process would
* crash on a recursive entry to virtio_net_set_status().
*/
if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
return VIRTIO_NET_ERR;
}
n->curr_queue_pairs = queue_pairs;
if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
/*
* Avoid updating the backend for a vdpa device: We're only interested
* in updating the device model queues.
*/
return VIRTIO_NET_OK;
}
/* stop the backend before changing the number of queue_pairs to avoid handling a
* disabled queue */
virtio_net_set_status(vdev, vdev->status);