virtio: move allocation to virtqueue_pop/vring_pop

The return code of virtqueue_pop/vring_pop is unused except to check for
errors or 0.  We can thus easily move allocation inside the functions
and just return a pointer to the VirtQueueElement.

The advantage is that we will be able to allocate only the space that
is needed for the actual size of the s/g list instead of the full
VIRTQUEUE_MAX_SIZE items.  Currently VirtQueueElement takes about 48K
of memory, and this kind of allocation puts a lot of stress on malloc.
By cutting the size by two or three orders of magnitude, malloc can
use much more efficient algorithms.

The patch is pretty large, but changes to each device are testable
more or less independently.  Splitting it would mostly add churn.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
This commit is contained in:
Paolo Bonzini 2016-02-04 16:26:51 +02:00 committed by Michael S. Tsirkin
parent 6aa46d8ff1
commit 51b19ebe43
22 changed files with 209 additions and 142 deletions

View file

@ -44,7 +44,7 @@ void vring_teardown(Vring *vring, VirtIODevice *vdev, int n);
void vring_disable_notification(VirtIODevice *vdev, Vring *vring);
bool vring_enable_notification(VirtIODevice *vdev, Vring *vring);
bool vring_should_notify(VirtIODevice *vdev, Vring *vring);
int vring_pop(VirtIODevice *vdev, Vring *vring, VirtQueueElement *elem);
void *vring_pop(VirtIODevice *vdev, Vring *vring, size_t sz);
void vring_push(VirtIODevice *vdev, Vring *vring, VirtQueueElement *elem,
int len);

View file

@ -37,7 +37,7 @@ typedef struct VirtIOBalloon {
uint32_t num_pages;
uint32_t actual;
uint64_t stats[VIRTIO_BALLOON_S_NR];
VirtQueueElement stats_vq_elem;
VirtQueueElement *stats_vq_elem;
size_t stats_vq_offset;
QEMUTimer *stats_timer;
int64_t stats_last_update;

View file

@ -80,8 +80,7 @@ typedef struct MultiReqBuffer {
bool is_write;
} MultiReqBuffer;
VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s);
void virtio_blk_init_request(VirtIOBlock *s, VirtIOBlockReq *req);
void virtio_blk_free_request(VirtIOBlockReq *req);
void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb);

View file

@ -47,7 +47,7 @@ typedef struct VirtIONetQueue {
QEMUBH *tx_bh;
int tx_waiting;
struct {
VirtQueueElement elem;
VirtQueueElement *elem;
} async_tx;
struct VirtIONet *n;
} VirtIONetQueue;

View file

@ -160,7 +160,7 @@ void virtio_scsi_common_unrealize(DeviceState *dev, Error **errp);
void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req);
bool virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req);
void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req);
VirtIOSCSIReq *virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq);
void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req);
void virtio_scsi_free_req(VirtIOSCSIReq *req);
void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
uint32_t event, uint32_t reason);

View file

@ -122,7 +122,7 @@ struct VirtIOSerialPort {
* element popped and continue consuming it once the backend
* becomes writable again.
*/
VirtQueueElement elem;
VirtQueueElement *elem;
/*
* The index and the offset into the iov buffer that was popped in

View file

@ -152,7 +152,7 @@ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len, unsigned int idx);
void virtqueue_map(VirtQueueElement *elem);
int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem);
void *virtqueue_pop(VirtQueue *vq, size_t sz);
int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
unsigned int out_bytes);
void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,