mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-17 07:02:03 -06:00
Pull request
v2: * Fix virtio 16lx -> HWADDR_PRIx format specifier [Peter] -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJWG4kaAAoJEJykq7OBq3PI+EoIALrMej8cpS8a1ByGcnxoXi0k SJlhpUC6M9F1YyY6Vx/juRcTibBuE4RNrWirdVXOkINwyHrOsGRd8p8yLWCdUO9G 3Yznu9fpHcoPRojcKh5m6QhZzm1HUqjO6GoaZMOCpGdrwhUo8ThwBnXR0ffCF98v Is0kTkoUVSK5IVHBamzb/myVKSWb6uOOFO6Pc7Kvivlui7cNMUGom2pS6FJD4eEG K0+EUoKBGYYwKhZS6IBUDPGFN9takrqkzZODiheDrVegJTN64gUvzMJOgU5P/F7H jhRFio8LqxQC3uhbYDWyMS4PVWbm7RjfYu/ph9K3+/s3egwLs+VsNkv4lsf8x+Q= =i0do -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging Pull request v2: * Fix virtio 16lx -> HWADDR_PRIx format specifier [Peter] # gpg: Signature made Mon 12 Oct 2015 11:19:06 BST using RSA key ID 81AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" * remotes/stefanha/tags/block-pull-request: sdhci.c: Limit the maximum block size block: switch from g_slice allocator to malloc virtio dataplane: adapt dataplane for virtio Version 1 virtio-blk: use blk_io_plug/unplug for Linux AIO batching sdhci: Pass drive parameter to sdhci-pci via qdev property Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
5451316ed0
10 changed files with 106 additions and 39 deletions
|
@ -2218,7 +2218,7 @@ void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
|
||||||
{
|
{
|
||||||
BlockAIOCB *acb;
|
BlockAIOCB *acb;
|
||||||
|
|
||||||
acb = g_slice_alloc(aiocb_info->aiocb_size);
|
acb = g_malloc(aiocb_info->aiocb_size);
|
||||||
acb->aiocb_info = aiocb_info;
|
acb->aiocb_info = aiocb_info;
|
||||||
acb->bs = bs;
|
acb->bs = bs;
|
||||||
acb->cb = cb;
|
acb->cb = cb;
|
||||||
|
@ -2238,7 +2238,7 @@ void qemu_aio_unref(void *p)
|
||||||
BlockAIOCB *acb = p;
|
BlockAIOCB *acb = p;
|
||||||
assert(acb->refcnt > 0);
|
assert(acb->refcnt > 0);
|
||||||
if (--acb->refcnt == 0) {
|
if (--acb->refcnt == 0) {
|
||||||
g_slice_free1(acb->aiocb_info->aiocb_size, acb);
|
g_free(acb);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -113,7 +113,7 @@ static void mirror_iteration_done(MirrorOp *op, int ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
qemu_iovec_destroy(&op->qiov);
|
qemu_iovec_destroy(&op->qiov);
|
||||||
g_slice_free(MirrorOp, op);
|
g_free(op);
|
||||||
|
|
||||||
if (s->waiting_for_io) {
|
if (s->waiting_for_io) {
|
||||||
qemu_coroutine_enter(s->common.co, NULL);
|
qemu_coroutine_enter(s->common.co, NULL);
|
||||||
|
@ -264,7 +264,7 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
|
||||||
} while (delay_ns == 0 && next_sector < end);
|
} while (delay_ns == 0 && next_sector < end);
|
||||||
|
|
||||||
/* Allocate a MirrorOp that is used as an AIO callback. */
|
/* Allocate a MirrorOp that is used as an AIO callback. */
|
||||||
op = g_slice_new(MirrorOp);
|
op = g_new(MirrorOp, 1);
|
||||||
op->s = s;
|
op->s = s;
|
||||||
op->sector_num = sector_num;
|
op->sector_num = sector_num;
|
||||||
op->nb_sectors = nb_sectors;
|
op->nb_sectors = nb_sectors;
|
||||||
|
|
|
@ -1259,7 +1259,7 @@ static int aio_worker(void *arg)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
g_slice_free(RawPosixAIOData, aiocb);
|
g_free(aiocb);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1267,7 +1267,7 @@ static int paio_submit_co(BlockDriverState *bs, int fd,
|
||||||
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
|
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
|
||||||
int type)
|
int type)
|
||||||
{
|
{
|
||||||
RawPosixAIOData *acb = g_slice_new(RawPosixAIOData);
|
RawPosixAIOData *acb = g_new(RawPosixAIOData, 1);
|
||||||
ThreadPool *pool;
|
ThreadPool *pool;
|
||||||
|
|
||||||
acb->bs = bs;
|
acb->bs = bs;
|
||||||
|
@ -1292,7 +1292,7 @@ static BlockAIOCB *paio_submit(BlockDriverState *bs, int fd,
|
||||||
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
|
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
|
||||||
BlockCompletionFunc *cb, void *opaque, int type)
|
BlockCompletionFunc *cb, void *opaque, int type)
|
||||||
{
|
{
|
||||||
RawPosixAIOData *acb = g_slice_new(RawPosixAIOData);
|
RawPosixAIOData *acb = g_new(RawPosixAIOData, 1);
|
||||||
ThreadPool *pool;
|
ThreadPool *pool;
|
||||||
|
|
||||||
acb->bs = bs;
|
acb->bs = bs;
|
||||||
|
@ -2237,7 +2237,7 @@ static BlockAIOCB *hdev_aio_ioctl(BlockDriverState *bs,
|
||||||
if (fd_open(bs) < 0)
|
if (fd_open(bs) < 0)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
acb = g_slice_new(RawPosixAIOData);
|
acb = g_new(RawPosixAIOData, 1);
|
||||||
acb->bs = bs;
|
acb->bs = bs;
|
||||||
acb->aio_type = QEMU_AIO_IOCTL;
|
acb->aio_type = QEMU_AIO_IOCTL;
|
||||||
acb->aio_fildes = s->fd;
|
acb->aio_fildes = s->fd;
|
||||||
|
|
|
@ -135,7 +135,7 @@ static int aio_worker(void *arg)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
g_slice_free(RawWin32AIOData, aiocb);
|
g_free(aiocb);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -143,7 +143,7 @@ static BlockAIOCB *paio_submit(BlockDriverState *bs, HANDLE hfile,
|
||||||
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
|
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
|
||||||
BlockCompletionFunc *cb, void *opaque, int type)
|
BlockCompletionFunc *cb, void *opaque, int type)
|
||||||
{
|
{
|
||||||
RawWin32AIOData *acb = g_slice_new(RawWin32AIOData);
|
RawWin32AIOData *acb = g_new(RawWin32AIOData, 1);
|
||||||
ThreadPool *pool;
|
ThreadPool *pool;
|
||||||
|
|
||||||
acb->bs = bs;
|
acb->bs = bs;
|
||||||
|
|
|
@ -30,7 +30,7 @@
|
||||||
|
|
||||||
VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
|
VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
|
||||||
{
|
{
|
||||||
VirtIOBlockReq *req = g_slice_new(VirtIOBlockReq);
|
VirtIOBlockReq *req = g_new(VirtIOBlockReq, 1);
|
||||||
req->dev = s;
|
req->dev = s;
|
||||||
req->qiov.size = 0;
|
req->qiov.size = 0;
|
||||||
req->in_len = 0;
|
req->in_len = 0;
|
||||||
|
@ -42,7 +42,7 @@ VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
|
||||||
void virtio_blk_free_request(VirtIOBlockReq *req)
|
void virtio_blk_free_request(VirtIOBlockReq *req)
|
||||||
{
|
{
|
||||||
if (req) {
|
if (req) {
|
||||||
g_slice_free(VirtIOBlockReq, req);
|
g_free(req);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -600,6 +600,8 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
blk_io_plug(s->blk);
|
||||||
|
|
||||||
while ((req = virtio_blk_get_request(s))) {
|
while ((req = virtio_blk_get_request(s))) {
|
||||||
virtio_blk_handle_request(req, &mrb);
|
virtio_blk_handle_request(req, &mrb);
|
||||||
}
|
}
|
||||||
|
@ -607,6 +609,8 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
|
||||||
if (mrb.num_reqs) {
|
if (mrb.num_reqs) {
|
||||||
virtio_blk_submit_multireq(s->blk, &mrb);
|
virtio_blk_submit_multireq(s->blk, &mrb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
blk_io_unplug(s->blk);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void virtio_blk_dma_restart_bh(void *opaque)
|
static void virtio_blk_dma_restart_bh(void *opaque)
|
||||||
|
|
|
@ -492,7 +492,9 @@ SDState *sd_init(BlockBackend *blk, bool is_spi)
|
||||||
sd->blk = blk;
|
sd->blk = blk;
|
||||||
sd_reset(sd);
|
sd_reset(sd);
|
||||||
if (sd->blk) {
|
if (sd->blk) {
|
||||||
blk_attach_dev_nofail(sd->blk, sd);
|
/* Attach dev if not already attached. (This call ignores an
|
||||||
|
* error return code if sd->blk is already attached.) */
|
||||||
|
blk_attach_dev(sd->blk, sd);
|
||||||
blk_set_dev_ops(sd->blk, &sd_block_ops, sd);
|
blk_set_dev_ops(sd->blk, &sd_block_ops, sd);
|
||||||
}
|
}
|
||||||
vmstate_register(NULL, -1, &sd_vmstate, sd);
|
vmstate_register(NULL, -1, &sd_vmstate, sd);
|
||||||
|
|
|
@ -1009,6 +1009,16 @@ sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size)
|
||||||
MASKED_WRITE(s->blksize, mask, value);
|
MASKED_WRITE(s->blksize, mask, value);
|
||||||
MASKED_WRITE(s->blkcnt, mask >> 16, value >> 16);
|
MASKED_WRITE(s->blkcnt, mask >> 16, value >> 16);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Limit block size to the maximum buffer size */
|
||||||
|
if (extract32(s->blksize, 0, 12) > s->buf_maxsz) {
|
||||||
|
qemu_log_mask(LOG_GUEST_ERROR, "%s: Size 0x%x is larger than " \
|
||||||
|
"the maximum buffer 0x%x", __func__, s->blksize,
|
||||||
|
s->buf_maxsz);
|
||||||
|
|
||||||
|
s->blksize = deposit32(s->blksize, 0, 12, s->buf_maxsz);
|
||||||
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
case SDHC_ARGUMENT:
|
case SDHC_ARGUMENT:
|
||||||
MASKED_WRITE(s->argument, mask, value);
|
MASKED_WRITE(s->argument, mask, value);
|
||||||
|
@ -1145,13 +1155,9 @@ static inline unsigned int sdhci_get_fifolen(SDHCIState *s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sdhci_initfn(SDHCIState *s)
|
static void sdhci_initfn(SDHCIState *s, BlockBackend *blk)
|
||||||
{
|
{
|
||||||
DriveInfo *di;
|
s->card = sd_init(blk, false);
|
||||||
|
|
||||||
/* FIXME use a qdev drive property instead of drive_get_next() */
|
|
||||||
di = drive_get_next(IF_SD);
|
|
||||||
s->card = sd_init(di ? blk_by_legacy_dinfo(di) : NULL, false);
|
|
||||||
if (s->card == NULL) {
|
if (s->card == NULL) {
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
@ -1215,7 +1221,8 @@ const VMStateDescription sdhci_vmstate = {
|
||||||
|
|
||||||
/* Capabilities registers provide information on supported features of this
|
/* Capabilities registers provide information on supported features of this
|
||||||
* specific host controller implementation */
|
* specific host controller implementation */
|
||||||
static Property sdhci_properties[] = {
|
static Property sdhci_pci_properties[] = {
|
||||||
|
DEFINE_BLOCK_PROPERTIES(SDHCIState, conf),
|
||||||
DEFINE_PROP_UINT32("capareg", SDHCIState, capareg,
|
DEFINE_PROP_UINT32("capareg", SDHCIState, capareg,
|
||||||
SDHC_CAPAB_REG_DEFAULT),
|
SDHC_CAPAB_REG_DEFAULT),
|
||||||
DEFINE_PROP_UINT32("maxcurr", SDHCIState, maxcurr, 0),
|
DEFINE_PROP_UINT32("maxcurr", SDHCIState, maxcurr, 0),
|
||||||
|
@ -1227,7 +1234,7 @@ static void sdhci_pci_realize(PCIDevice *dev, Error **errp)
|
||||||
SDHCIState *s = PCI_SDHCI(dev);
|
SDHCIState *s = PCI_SDHCI(dev);
|
||||||
dev->config[PCI_CLASS_PROG] = 0x01; /* Standard Host supported DMA */
|
dev->config[PCI_CLASS_PROG] = 0x01; /* Standard Host supported DMA */
|
||||||
dev->config[PCI_INTERRUPT_PIN] = 0x01; /* interrupt pin A */
|
dev->config[PCI_INTERRUPT_PIN] = 0x01; /* interrupt pin A */
|
||||||
sdhci_initfn(s);
|
sdhci_initfn(s, s->conf.blk);
|
||||||
s->buf_maxsz = sdhci_get_fifolen(s);
|
s->buf_maxsz = sdhci_get_fifolen(s);
|
||||||
s->fifo_buffer = g_malloc0(s->buf_maxsz);
|
s->fifo_buffer = g_malloc0(s->buf_maxsz);
|
||||||
s->irq = pci_allocate_irq(dev);
|
s->irq = pci_allocate_irq(dev);
|
||||||
|
@ -1254,9 +1261,7 @@ static void sdhci_pci_class_init(ObjectClass *klass, void *data)
|
||||||
k->class_id = PCI_CLASS_SYSTEM_SDHCI;
|
k->class_id = PCI_CLASS_SYSTEM_SDHCI;
|
||||||
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
|
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
|
||||||
dc->vmsd = &sdhci_vmstate;
|
dc->vmsd = &sdhci_vmstate;
|
||||||
dc->props = sdhci_properties;
|
dc->props = sdhci_pci_properties;
|
||||||
/* Reason: realize() method uses drive_get_next() */
|
|
||||||
dc->cannot_instantiate_with_device_add_yet = true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const TypeInfo sdhci_pci_info = {
|
static const TypeInfo sdhci_pci_info = {
|
||||||
|
@ -1266,10 +1271,21 @@ static const TypeInfo sdhci_pci_info = {
|
||||||
.class_init = sdhci_pci_class_init,
|
.class_init = sdhci_pci_class_init,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static Property sdhci_sysbus_properties[] = {
|
||||||
|
DEFINE_PROP_UINT32("capareg", SDHCIState, capareg,
|
||||||
|
SDHC_CAPAB_REG_DEFAULT),
|
||||||
|
DEFINE_PROP_UINT32("maxcurr", SDHCIState, maxcurr, 0),
|
||||||
|
DEFINE_PROP_END_OF_LIST(),
|
||||||
|
};
|
||||||
|
|
||||||
static void sdhci_sysbus_init(Object *obj)
|
static void sdhci_sysbus_init(Object *obj)
|
||||||
{
|
{
|
||||||
SDHCIState *s = SYSBUS_SDHCI(obj);
|
SDHCIState *s = SYSBUS_SDHCI(obj);
|
||||||
sdhci_initfn(s);
|
DriveInfo *di;
|
||||||
|
|
||||||
|
/* FIXME use a qdev drive property instead of drive_get_next() */
|
||||||
|
di = drive_get_next(IF_SD);
|
||||||
|
sdhci_initfn(s, di ? blk_by_legacy_dinfo(di) : NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sdhci_sysbus_finalize(Object *obj)
|
static void sdhci_sysbus_finalize(Object *obj)
|
||||||
|
@ -1296,7 +1312,7 @@ static void sdhci_sysbus_class_init(ObjectClass *klass, void *data)
|
||||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||||
|
|
||||||
dc->vmsd = &sdhci_vmstate;
|
dc->vmsd = &sdhci_vmstate;
|
||||||
dc->props = sdhci_properties;
|
dc->props = sdhci_sysbus_properties;
|
||||||
dc->realize = sdhci_sysbus_realize;
|
dc->realize = sdhci_sysbus_realize;
|
||||||
/* Reason: instance_init() method uses drive_get_next() */
|
/* Reason: instance_init() method uses drive_get_next() */
|
||||||
dc->cannot_instantiate_with_device_add_yet = true;
|
dc->cannot_instantiate_with_device_add_yet = true;
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#define SDHCI_H
|
#define SDHCI_H
|
||||||
|
|
||||||
#include "qemu-common.h"
|
#include "qemu-common.h"
|
||||||
|
#include "hw/block/block.h"
|
||||||
#include "hw/pci/pci.h"
|
#include "hw/pci/pci.h"
|
||||||
#include "hw/sysbus.h"
|
#include "hw/sysbus.h"
|
||||||
#include "hw/sd.h"
|
#include "hw/sd.h"
|
||||||
|
@ -239,6 +240,7 @@ typedef struct SDHCIState {
|
||||||
};
|
};
|
||||||
SDState *card;
|
SDState *card;
|
||||||
MemoryRegion iomem;
|
MemoryRegion iomem;
|
||||||
|
BlockConf conf;
|
||||||
|
|
||||||
QEMUTimer *insert_timer; /* timer for 'changing' sd card. */
|
QEMUTimer *insert_timer; /* timer for 'changing' sd card. */
|
||||||
QEMUTimer *transfer_timer;
|
QEMUTimer *transfer_timer;
|
||||||
|
|
|
@ -67,22 +67,53 @@ static void vring_unmap(void *buffer, bool is_write)
|
||||||
/* Map the guest's vring to host memory */
|
/* Map the guest's vring to host memory */
|
||||||
bool vring_setup(Vring *vring, VirtIODevice *vdev, int n)
|
bool vring_setup(Vring *vring, VirtIODevice *vdev, int n)
|
||||||
{
|
{
|
||||||
hwaddr vring_addr = virtio_queue_get_ring_addr(vdev, n);
|
struct vring *vr = &vring->vr;
|
||||||
hwaddr vring_size = virtio_queue_get_ring_size(vdev, n);
|
hwaddr addr;
|
||||||
void *vring_ptr;
|
hwaddr size;
|
||||||
|
void *ptr;
|
||||||
|
|
||||||
vring->broken = false;
|
vring->broken = false;
|
||||||
|
vr->num = virtio_queue_get_num(vdev, n);
|
||||||
|
|
||||||
vring_ptr = vring_map(&vring->mr, vring_addr, vring_size, true);
|
addr = virtio_queue_get_desc_addr(vdev, n);
|
||||||
if (!vring_ptr) {
|
size = virtio_queue_get_desc_size(vdev, n);
|
||||||
error_report("Failed to map vring "
|
/* Map the descriptor area as read only */
|
||||||
"addr %#" HWADDR_PRIx " size %" HWADDR_PRIu,
|
ptr = vring_map(&vring->mr_desc, addr, size, false);
|
||||||
vring_addr, vring_size);
|
if (!ptr) {
|
||||||
vring->broken = true;
|
error_report("Failed to map 0x%" HWADDR_PRIx " byte for vring desc "
|
||||||
return false;
|
"at 0x%" HWADDR_PRIx,
|
||||||
|
size, addr);
|
||||||
|
goto out_err_desc;
|
||||||
}
|
}
|
||||||
|
vr->desc = ptr;
|
||||||
|
|
||||||
vring_init(&vring->vr, virtio_queue_get_num(vdev, n), vring_ptr, 4096);
|
addr = virtio_queue_get_avail_addr(vdev, n);
|
||||||
|
size = virtio_queue_get_avail_size(vdev, n);
|
||||||
|
/* Add the size of the used_event_idx */
|
||||||
|
size += sizeof(uint16_t);
|
||||||
|
/* Map the driver area as read only */
|
||||||
|
ptr = vring_map(&vring->mr_avail, addr, size, false);
|
||||||
|
if (!ptr) {
|
||||||
|
error_report("Failed to map 0x%" HWADDR_PRIx " byte for vring avail "
|
||||||
|
"at 0x%" HWADDR_PRIx,
|
||||||
|
size, addr);
|
||||||
|
goto out_err_avail;
|
||||||
|
}
|
||||||
|
vr->avail = ptr;
|
||||||
|
|
||||||
|
addr = virtio_queue_get_used_addr(vdev, n);
|
||||||
|
size = virtio_queue_get_used_size(vdev, n);
|
||||||
|
/* Add the size of the avail_event_idx */
|
||||||
|
size += sizeof(uint16_t);
|
||||||
|
/* Map the device area as read-write */
|
||||||
|
ptr = vring_map(&vring->mr_used, addr, size, true);
|
||||||
|
if (!ptr) {
|
||||||
|
error_report("Failed to map 0x%" HWADDR_PRIx " byte for vring used "
|
||||||
|
"at 0x%" HWADDR_PRIx,
|
||||||
|
size, addr);
|
||||||
|
goto out_err_used;
|
||||||
|
}
|
||||||
|
vr->used = ptr;
|
||||||
|
|
||||||
vring->last_avail_idx = virtio_queue_get_last_avail_idx(vdev, n);
|
vring->last_avail_idx = virtio_queue_get_last_avail_idx(vdev, n);
|
||||||
vring->last_used_idx = vring_get_used_idx(vdev, vring);
|
vring->last_used_idx = vring_get_used_idx(vdev, vring);
|
||||||
|
@ -92,6 +123,14 @@ bool vring_setup(Vring *vring, VirtIODevice *vdev, int n)
|
||||||
trace_vring_setup(virtio_queue_get_ring_addr(vdev, n),
|
trace_vring_setup(virtio_queue_get_ring_addr(vdev, n),
|
||||||
vring->vr.desc, vring->vr.avail, vring->vr.used);
|
vring->vr.desc, vring->vr.avail, vring->vr.used);
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
|
out_err_used:
|
||||||
|
memory_region_unref(vring->mr_avail);
|
||||||
|
out_err_avail:
|
||||||
|
memory_region_unref(vring->mr_desc);
|
||||||
|
out_err_desc:
|
||||||
|
vring->broken = true;
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void vring_teardown(Vring *vring, VirtIODevice *vdev, int n)
|
void vring_teardown(Vring *vring, VirtIODevice *vdev, int n)
|
||||||
|
@ -99,7 +138,9 @@ void vring_teardown(Vring *vring, VirtIODevice *vdev, int n)
|
||||||
virtio_queue_set_last_avail_idx(vdev, n, vring->last_avail_idx);
|
virtio_queue_set_last_avail_idx(vdev, n, vring->last_avail_idx);
|
||||||
virtio_queue_invalidate_signalled_used(vdev, n);
|
virtio_queue_invalidate_signalled_used(vdev, n);
|
||||||
|
|
||||||
memory_region_unref(vring->mr);
|
memory_region_unref(vring->mr_desc);
|
||||||
|
memory_region_unref(vring->mr_avail);
|
||||||
|
memory_region_unref(vring->mr_used);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Disable guest->host notifies */
|
/* Disable guest->host notifies */
|
||||||
|
|
|
@ -22,7 +22,9 @@
|
||||||
#include "hw/virtio/virtio.h"
|
#include "hw/virtio/virtio.h"
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
MemoryRegion *mr; /* memory region containing the vring */
|
MemoryRegion *mr_desc; /* memory region for the vring desc */
|
||||||
|
MemoryRegion *mr_avail; /* memory region for the vring avail */
|
||||||
|
MemoryRegion *mr_used; /* memory region for the vring used */
|
||||||
struct vring vr; /* virtqueue vring mapped to host memory */
|
struct vring vr; /* virtqueue vring mapped to host memory */
|
||||||
uint16_t last_avail_idx; /* last processed avail ring index */
|
uint16_t last_avail_idx; /* last processed avail ring index */
|
||||||
uint16_t last_used_idx; /* last processed used ring index */
|
uint16_t last_used_idx; /* last processed used ring index */
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue