mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-18 23:52:14 -06:00
vdpa: move iotlb_batch_begin_sent to vhost_vdpa_shared
Next patches will register the vhost_vdpa memory listener while the VM is migrating at the destination, so we can map the memory to the device before stopping the VM at the source. The main goal is to reduce the downtime. However, the destination QEMU is unaware of which vhost_vdpa device will register its memory_listener. If the source guest has CVQ enabled, it will be the CVQ device. Otherwise, it will be the first one. Move the iotlb_batch_begin_sent member to VhostVDPAShared so all vhost_vdpa can use it, rather than always in the first / last vhost_vdpa. Signed-off-by: Eugenio Pérez <eperezma@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com> Message-Id: <20231221174322.3130442-8-eperezma@redhat.com> Tested-by: Lei Yang <leiyang@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
f12b2498e5
commit
7627f0a2de
2 changed files with 6 additions and 5 deletions
|
@ -162,11 +162,11 @@ static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v)
|
||||||
static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v)
|
static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v)
|
||||||
{
|
{
|
||||||
if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
|
if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
|
||||||
!v->iotlb_batch_begin_sent) {
|
!v->shared->iotlb_batch_begin_sent) {
|
||||||
vhost_vdpa_listener_begin_batch(v);
|
vhost_vdpa_listener_begin_batch(v);
|
||||||
}
|
}
|
||||||
|
|
||||||
v->iotlb_batch_begin_sent = true;
|
v->shared->iotlb_batch_begin_sent = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vhost_vdpa_listener_commit(MemoryListener *listener)
|
static void vhost_vdpa_listener_commit(MemoryListener *listener)
|
||||||
|
@ -180,7 +180,7 @@ static void vhost_vdpa_listener_commit(MemoryListener *listener)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!v->iotlb_batch_begin_sent) {
|
if (!v->shared->iotlb_batch_begin_sent) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -193,7 +193,7 @@ static void vhost_vdpa_listener_commit(MemoryListener *listener)
|
||||||
fd, errno, strerror(errno));
|
fd, errno, strerror(errno));
|
||||||
}
|
}
|
||||||
|
|
||||||
v->iotlb_batch_begin_sent = false;
|
v->shared->iotlb_batch_begin_sent = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
|
static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
|
||||||
|
|
|
@ -38,6 +38,8 @@ typedef struct vhost_vdpa_shared {
|
||||||
/* IOVA mapping used by the Shadow Virtqueue */
|
/* IOVA mapping used by the Shadow Virtqueue */
|
||||||
VhostIOVATree *iova_tree;
|
VhostIOVATree *iova_tree;
|
||||||
|
|
||||||
|
bool iotlb_batch_begin_sent;
|
||||||
|
|
||||||
/* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */
|
/* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */
|
||||||
bool shadow_data;
|
bool shadow_data;
|
||||||
} VhostVDPAShared;
|
} VhostVDPAShared;
|
||||||
|
@ -45,7 +47,6 @@ typedef struct vhost_vdpa_shared {
|
||||||
typedef struct vhost_vdpa {
|
typedef struct vhost_vdpa {
|
||||||
int index;
|
int index;
|
||||||
uint32_t msg_type;
|
uint32_t msg_type;
|
||||||
bool iotlb_batch_begin_sent;
|
|
||||||
uint32_t address_space_id;
|
uint32_t address_space_id;
|
||||||
MemoryListener listener;
|
MemoryListener listener;
|
||||||
uint64_t acked_features;
|
uint64_t acked_features;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue