mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-07-27 04:13:53 -06:00

Creates and supports a GPA->IOVA tree and a partial IOVA->HVA tree by splitting up guest-backed memory maps and host-only memory maps from the full IOVA->HVA tree. That is, any guest-backed memory maps are now stored in the GPA->IOVA tree and host-only memory maps stay in the IOVA->HVA tree. Also propagates the GPAs (in_addr/out_addr) of a VirtQueueElement to vhost_svq_translate_addr() to translate GPAs to IOVAs via the GPA->IOVA tree (when descriptors are backed by guest memory). For descriptors backed by host-only memory, the existing partial SVQ IOVA->HVA tree is used. GPAs are unique in the guest's address space, ensuring unambiguous IOVA translations. This avoids the issue where different GPAs map to the same HVA, causing the original HVA->IOVA translation to potentially return an IOVA associated with the wrong intended GPA. Signed-off-by: Jonah Palmer <jonah.palmer@oracle.com> Acked-by: Eugenio Pérez <eperezma@redhat.com> Message-Id: <20250217144936.3589907-3-jonah.palmer@oracle.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
143 lines
4.3 KiB
C
143 lines
4.3 KiB
C
/*
|
|
* vhost shadow virtqueue
|
|
*
|
|
* SPDX-FileCopyrightText: Red Hat, Inc. 2021
|
|
* SPDX-FileContributor: Author: Eugenio Pérez <eperezma@redhat.com>
|
|
*
|
|
* SPDX-License-Identifier: GPL-2.0-or-later
|
|
*/
|
|
|
|
#ifndef VHOST_SHADOW_VIRTQUEUE_H
|
|
#define VHOST_SHADOW_VIRTQUEUE_H
|
|
|
|
#include "qemu/event_notifier.h"
|
|
#include "hw/virtio/virtio.h"
|
|
#include "standard-headers/linux/vhost_types.h"
|
|
#include "hw/virtio/vhost-iova-tree.h"
|
|
|
|
typedef struct SVQDescState {
|
|
VirtQueueElement *elem;
|
|
|
|
/*
|
|
* Number of descriptors exposed to the device. May or may not match
|
|
* guest's
|
|
*/
|
|
unsigned int ndescs;
|
|
} SVQDescState;
|
|
|
|
typedef struct VhostShadowVirtqueue VhostShadowVirtqueue;
|
|
|
|
/**
|
|
* Callback to handle an avail buffer.
|
|
*
|
|
* @svq: Shadow virtqueue
|
|
* @elem: Element placed in the queue by the guest
|
|
* @vq_callback_opaque: Opaque
|
|
*
|
|
* Returns 0 if the vq is running as expected.
|
|
*
|
|
* Note that ownership of elem is transferred to the callback.
|
|
*/
|
|
typedef int (*VirtQueueAvailCallback)(VhostShadowVirtqueue *svq,
|
|
VirtQueueElement *elem,
|
|
void *vq_callback_opaque);
|
|
|
|
typedef struct VhostShadowVirtqueueOps {
|
|
VirtQueueAvailCallback avail_handler;
|
|
} VhostShadowVirtqueueOps;
|
|
|
|
/* Shadow virtqueue to relay notifications */
|
|
typedef struct VhostShadowVirtqueue {
|
|
/* Shadow vring */
|
|
struct vring vring;
|
|
|
|
/* Shadow kick notifier, sent to vhost */
|
|
EventNotifier hdev_kick;
|
|
/* Shadow call notifier, sent to vhost */
|
|
EventNotifier hdev_call;
|
|
|
|
/*
|
|
* Borrowed virtqueue's guest to host notifier. To borrow it in this event
|
|
* notifier allows to recover the VhostShadowVirtqueue from the event loop
|
|
* easily. If we use the VirtQueue's one, we don't have an easy way to
|
|
* retrieve VhostShadowVirtqueue.
|
|
*
|
|
* So shadow virtqueue must not clean it, or we would lose VirtQueue one.
|
|
*/
|
|
EventNotifier svq_kick;
|
|
|
|
/* Guest's call notifier, where the SVQ calls guest. */
|
|
EventNotifier svq_call;
|
|
|
|
/* Virtio queue shadowing */
|
|
VirtQueue *vq;
|
|
|
|
/* Virtio device */
|
|
VirtIODevice *vdev;
|
|
|
|
/* IOVA mapping */
|
|
VhostIOVATree *iova_tree;
|
|
|
|
/* SVQ vring descriptors state */
|
|
SVQDescState *desc_state;
|
|
|
|
/* Next VirtQueue element that guest made available */
|
|
VirtQueueElement *next_guest_avail_elem;
|
|
|
|
/*
|
|
* Backup next field for each descriptor so we can recover securely, not
|
|
* needing to trust the device access.
|
|
*/
|
|
uint16_t *desc_next;
|
|
|
|
/* Caller callbacks */
|
|
const VhostShadowVirtqueueOps *ops;
|
|
|
|
/* Caller callbacks opaque */
|
|
void *ops_opaque;
|
|
|
|
/* Next head to expose to the device */
|
|
uint16_t shadow_avail_idx;
|
|
|
|
/* Next free descriptor */
|
|
uint16_t free_head;
|
|
|
|
/* Last seen used idx */
|
|
uint16_t shadow_used_idx;
|
|
|
|
/* Next head to consume from the device */
|
|
uint16_t last_used_idx;
|
|
|
|
/* Size of SVQ vring free descriptors */
|
|
uint16_t num_free;
|
|
} VhostShadowVirtqueue;
|
|
|
|
bool vhost_svq_valid_features(uint64_t features, Error **errp);
|
|
|
|
uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq);
|
|
void vhost_svq_push_elem(VhostShadowVirtqueue *svq,
|
|
const VirtQueueElement *elem, uint32_t len);
|
|
int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
|
|
size_t out_num, const hwaddr *out_addr,
|
|
const struct iovec *in_sg, size_t in_num,
|
|
const hwaddr *in_addr, VirtQueueElement *elem);
|
|
size_t vhost_svq_poll(VhostShadowVirtqueue *svq, size_t num);
|
|
|
|
void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd);
|
|
void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd);
|
|
void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq,
|
|
struct vhost_vring_addr *addr);
|
|
size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq);
|
|
size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq);
|
|
|
|
void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
|
|
VirtQueue *vq, VhostIOVATree *iova_tree);
|
|
void vhost_svq_stop(VhostShadowVirtqueue *svq);
|
|
|
|
VhostShadowVirtqueue *vhost_svq_new(const VhostShadowVirtqueueOps *ops,
|
|
void *ops_opaque);
|
|
|
|
void vhost_svq_free(gpointer vq);
|
|
G_DEFINE_AUTOPTR_CLEANUP_FUNC(VhostShadowVirtqueue, vhost_svq_free);
|
|
|
|
#endif
|