hw: move virtio devices to hw/ subdirectories

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2013-02-05 12:42:31 +01:00
parent 49ab747f66
commit 6e7907468f
24 changed files with 15 additions and 7 deletions

View file

@ -6,3 +6,6 @@ common-obj-$(CONFIG_PFLASH_CFI01) += pflash_cfi01.o
common-obj-$(CONFIG_PFLASH_CFI02) += pflash_cfi02.o
common-obj-$(CONFIG_XEN_BACKEND) += xen_disk.o
common-obj-$(CONFIG_ECC) += ecc.o
obj-$(CONFIG_VIRTIO) += virtio-blk.o
obj-$(CONFIG_VIRTIO_BLK_DATA_PLANE) += dataplane/

View file

@ -0,0 +1 @@
obj-y += ioq.o virtio-blk.o

117
hw/block/dataplane/ioq.c Normal file
View file

@ -0,0 +1,117 @@
/*
* Linux AIO request queue
*
* Copyright 2012 IBM, Corp.
* Copyright 2012 Red Hat, Inc. and/or its affiliates
*
* Authors:
* Stefan Hajnoczi <stefanha@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "ioq.h"
void ioq_init(IOQueue *ioq, int fd, unsigned int max_reqs)
{
int rc;
ioq->fd = fd;
ioq->max_reqs = max_reqs;
memset(&ioq->io_ctx, 0, sizeof ioq->io_ctx);
rc = io_setup(max_reqs, &ioq->io_ctx);
if (rc != 0) {
fprintf(stderr, "ioq io_setup failed %d\n", rc);
exit(1);
}
rc = event_notifier_init(&ioq->io_notifier, 0);
if (rc != 0) {
fprintf(stderr, "ioq io event notifier creation failed %d\n", rc);
exit(1);
}
ioq->freelist = g_malloc0(sizeof ioq->freelist[0] * max_reqs);
ioq->freelist_idx = 0;
ioq->queue = g_malloc0(sizeof ioq->queue[0] * max_reqs);
ioq->queue_idx = 0;
}
void ioq_cleanup(IOQueue *ioq)
{
g_free(ioq->freelist);
g_free(ioq->queue);
event_notifier_cleanup(&ioq->io_notifier);
io_destroy(ioq->io_ctx);
}
EventNotifier *ioq_get_notifier(IOQueue *ioq)
{
return &ioq->io_notifier;
}
struct iocb *ioq_get_iocb(IOQueue *ioq)
{
/* Underflow cannot happen since ioq is sized for max_reqs */
assert(ioq->freelist_idx != 0);
struct iocb *iocb = ioq->freelist[--ioq->freelist_idx];
ioq->queue[ioq->queue_idx++] = iocb;
return iocb;
}
void ioq_put_iocb(IOQueue *ioq, struct iocb *iocb)
{
/* Overflow cannot happen since ioq is sized for max_reqs */
assert(ioq->freelist_idx != ioq->max_reqs);
ioq->freelist[ioq->freelist_idx++] = iocb;
}
struct iocb *ioq_rdwr(IOQueue *ioq, bool read, struct iovec *iov,
unsigned int count, long long offset)
{
struct iocb *iocb = ioq_get_iocb(ioq);
if (read) {
io_prep_preadv(iocb, ioq->fd, iov, count, offset);
} else {
io_prep_pwritev(iocb, ioq->fd, iov, count, offset);
}
io_set_eventfd(iocb, event_notifier_get_fd(&ioq->io_notifier));
return iocb;
}
int ioq_submit(IOQueue *ioq)
{
int rc = io_submit(ioq->io_ctx, ioq->queue_idx, ioq->queue);
ioq->queue_idx = 0; /* reset */
return rc;
}
int ioq_run_completion(IOQueue *ioq, IOQueueCompletion *completion,
void *opaque)
{
struct io_event events[ioq->max_reqs];
int nevents, i;
do {
nevents = io_getevents(ioq->io_ctx, 0, ioq->max_reqs, events, NULL);
} while (nevents < 0 && errno == EINTR);
if (nevents < 0) {
return nevents;
}
for (i = 0; i < nevents; i++) {
ssize_t ret = ((uint64_t)events[i].res2 << 32) | events[i].res;
completion(events[i].obj, ret, opaque);
ioq_put_iocb(ioq, events[i].obj);
}
return nevents;
}

57
hw/block/dataplane/ioq.h Normal file
View file

@ -0,0 +1,57 @@
/*
* Linux AIO request queue
*
* Copyright 2012 IBM, Corp.
* Copyright 2012 Red Hat, Inc. and/or its affiliates
*
* Authors:
* Stefan Hajnoczi <stefanha@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#ifndef IOQ_H
#define IOQ_H
#include <libaio.h>
#include "qemu/event_notifier.h"
typedef struct {
int fd; /* file descriptor */
unsigned int max_reqs; /* max length of freelist and queue */
io_context_t io_ctx; /* Linux AIO context */
EventNotifier io_notifier; /* Linux AIO eventfd */
/* Requests can complete in any order so a free list is necessary to manage
* available iocbs.
*/
struct iocb **freelist; /* free iocbs */
unsigned int freelist_idx;
/* Multiple requests are queued up before submitting them all in one go */
struct iocb **queue; /* queued iocbs */
unsigned int queue_idx;
} IOQueue;
void ioq_init(IOQueue *ioq, int fd, unsigned int max_reqs);
void ioq_cleanup(IOQueue *ioq);
EventNotifier *ioq_get_notifier(IOQueue *ioq);
struct iocb *ioq_get_iocb(IOQueue *ioq);
void ioq_put_iocb(IOQueue *ioq, struct iocb *iocb);
struct iocb *ioq_rdwr(IOQueue *ioq, bool read, struct iovec *iov,
unsigned int count, long long offset);
int ioq_submit(IOQueue *ioq);
static inline unsigned int ioq_num_queued(IOQueue *ioq)
{
return ioq->queue_idx;
}
typedef void IOQueueCompletion(struct iocb *iocb, ssize_t ret, void *opaque);
int ioq_run_completion(IOQueue *ioq, IOQueueCompletion *completion,
void *opaque);
#endif /* IOQ_H */

View file

@ -0,0 +1,540 @@
/*
* Dedicated thread for virtio-blk I/O processing
*
* Copyright 2012 IBM, Corp.
* Copyright 2012 Red Hat, Inc. and/or its affiliates
*
* Authors:
* Stefan Hajnoczi <stefanha@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "trace.h"
#include "qemu/iov.h"
#include "qemu/thread.h"
#include "qemu/error-report.h"
#include "hw/virtio/dataplane/vring.h"
#include "ioq.h"
#include "migration/migration.h"
#include "block/block.h"
#include "hw/virtio/virtio-blk.h"
#include "virtio-blk.h"
#include "block/aio.h"
enum {
SEG_MAX = 126, /* maximum number of I/O segments */
VRING_MAX = SEG_MAX + 2, /* maximum number of vring descriptors */
REQ_MAX = VRING_MAX, /* maximum number of requests in the vring,
* is VRING_MAX / 2 with traditional and
* VRING_MAX with indirect descriptors */
};
typedef struct {
struct iocb iocb; /* Linux AIO control block */
QEMUIOVector *inhdr; /* iovecs for virtio_blk_inhdr */
unsigned int head; /* vring descriptor index */
struct iovec *bounce_iov; /* used if guest buffers are unaligned */
QEMUIOVector *read_qiov; /* for read completion /w bounce buffer */
} VirtIOBlockRequest;
struct VirtIOBlockDataPlane {
bool started;
bool stopping;
QEMUBH *start_bh;
QemuThread thread;
VirtIOBlkConf *blk;
int fd; /* image file descriptor */
VirtIODevice *vdev;
Vring vring; /* virtqueue vring */
EventNotifier *guest_notifier; /* irq */
/* Note that these EventNotifiers are assigned by value. This is
* fine as long as you do not call event_notifier_cleanup on them
* (because you don't own the file descriptor or handle; you just
* use it).
*/
AioContext *ctx;
EventNotifier io_notifier; /* Linux AIO completion */
EventNotifier host_notifier; /* doorbell */
IOQueue ioqueue; /* Linux AIO queue (should really be per
dataplane thread) */
VirtIOBlockRequest requests[REQ_MAX]; /* pool of requests, managed by the
queue */
unsigned int num_reqs;
Error *migration_blocker;
};
/* Raise an interrupt to signal guest, if necessary */
static void notify_guest(VirtIOBlockDataPlane *s)
{
if (!vring_should_notify(s->vdev, &s->vring)) {
return;
}
event_notifier_set(s->guest_notifier);
}
static void complete_request(struct iocb *iocb, ssize_t ret, void *opaque)
{
VirtIOBlockDataPlane *s = opaque;
VirtIOBlockRequest *req = container_of(iocb, VirtIOBlockRequest, iocb);
struct virtio_blk_inhdr hdr;
int len;
if (likely(ret >= 0)) {
hdr.status = VIRTIO_BLK_S_OK;
len = ret;
} else {
hdr.status = VIRTIO_BLK_S_IOERR;
len = 0;
}
trace_virtio_blk_data_plane_complete_request(s, req->head, ret);
if (req->read_qiov) {
assert(req->bounce_iov);
qemu_iovec_from_buf(req->read_qiov, 0, req->bounce_iov->iov_base, len);
qemu_iovec_destroy(req->read_qiov);
g_slice_free(QEMUIOVector, req->read_qiov);
}
if (req->bounce_iov) {
qemu_vfree(req->bounce_iov->iov_base);
g_slice_free(struct iovec, req->bounce_iov);
}
qemu_iovec_from_buf(req->inhdr, 0, &hdr, sizeof(hdr));
qemu_iovec_destroy(req->inhdr);
g_slice_free(QEMUIOVector, req->inhdr);
/* According to the virtio specification len should be the number of bytes
* written to, but for virtio-blk it seems to be the number of bytes
* transferred plus the status bytes.
*/
vring_push(&s->vring, req->head, len + sizeof(hdr));
s->num_reqs--;
}
static void complete_request_early(VirtIOBlockDataPlane *s, unsigned int head,
QEMUIOVector *inhdr, unsigned char status)
{
struct virtio_blk_inhdr hdr = {
.status = status,
};
qemu_iovec_from_buf(inhdr, 0, &hdr, sizeof(hdr));
qemu_iovec_destroy(inhdr);
g_slice_free(QEMUIOVector, inhdr);
vring_push(&s->vring, head, sizeof(hdr));
notify_guest(s);
}
/* Get disk serial number */
static void do_get_id_cmd(VirtIOBlockDataPlane *s,
struct iovec *iov, unsigned int iov_cnt,
unsigned int head, QEMUIOVector *inhdr)
{
char id[VIRTIO_BLK_ID_BYTES];
/* Serial number not NUL-terminated when shorter than buffer */
strncpy(id, s->blk->serial ? s->blk->serial : "", sizeof(id));
iov_from_buf(iov, iov_cnt, 0, id, sizeof(id));
complete_request_early(s, head, inhdr, VIRTIO_BLK_S_OK);
}
static int do_rdwr_cmd(VirtIOBlockDataPlane *s, bool read,
struct iovec *iov, unsigned int iov_cnt,
long long offset, unsigned int head,
QEMUIOVector *inhdr)
{
struct iocb *iocb;
QEMUIOVector qiov;
struct iovec *bounce_iov = NULL;
QEMUIOVector *read_qiov = NULL;
qemu_iovec_init_external(&qiov, iov, iov_cnt);
if (!bdrv_qiov_is_aligned(s->blk->conf.bs, &qiov)) {
void *bounce_buffer = qemu_blockalign(s->blk->conf.bs, qiov.size);
if (read) {
/* Need to copy back from bounce buffer on completion */
read_qiov = g_slice_new(QEMUIOVector);
qemu_iovec_init(read_qiov, iov_cnt);
qemu_iovec_concat_iov(read_qiov, iov, iov_cnt, 0, qiov.size);
} else {
qemu_iovec_to_buf(&qiov, 0, bounce_buffer, qiov.size);
}
/* Redirect I/O to aligned bounce buffer */
bounce_iov = g_slice_new(struct iovec);
bounce_iov->iov_base = bounce_buffer;
bounce_iov->iov_len = qiov.size;
iov = bounce_iov;
iov_cnt = 1;
}
iocb = ioq_rdwr(&s->ioqueue, read, iov, iov_cnt, offset);
/* Fill in virtio block metadata needed for completion */
VirtIOBlockRequest *req = container_of(iocb, VirtIOBlockRequest, iocb);
req->head = head;
req->inhdr = inhdr;
req->bounce_iov = bounce_iov;
req->read_qiov = read_qiov;
return 0;
}
static int process_request(IOQueue *ioq, struct iovec iov[],
unsigned int out_num, unsigned int in_num,
unsigned int head)
{
VirtIOBlockDataPlane *s = container_of(ioq, VirtIOBlockDataPlane, ioqueue);
struct iovec *in_iov = &iov[out_num];
struct virtio_blk_outhdr outhdr;
QEMUIOVector *inhdr;
size_t in_size;
/* Copy in outhdr */
if (unlikely(iov_to_buf(iov, out_num, 0, &outhdr,
sizeof(outhdr)) != sizeof(outhdr))) {
error_report("virtio-blk request outhdr too short");
return -EFAULT;
}
iov_discard_front(&iov, &out_num, sizeof(outhdr));
/* Grab inhdr for later */
in_size = iov_size(in_iov, in_num);
if (in_size < sizeof(struct virtio_blk_inhdr)) {
error_report("virtio_blk request inhdr too short");
return -EFAULT;
}
inhdr = g_slice_new(QEMUIOVector);
qemu_iovec_init(inhdr, 1);
qemu_iovec_concat_iov(inhdr, in_iov, in_num,
in_size - sizeof(struct virtio_blk_inhdr),
sizeof(struct virtio_blk_inhdr));
iov_discard_back(in_iov, &in_num, sizeof(struct virtio_blk_inhdr));
/* TODO Linux sets the barrier bit even when not advertised! */
outhdr.type &= ~VIRTIO_BLK_T_BARRIER;
switch (outhdr.type) {
case VIRTIO_BLK_T_IN:
do_rdwr_cmd(s, true, in_iov, in_num, outhdr.sector * 512, head, inhdr);
return 0;
case VIRTIO_BLK_T_OUT:
do_rdwr_cmd(s, false, iov, out_num, outhdr.sector * 512, head, inhdr);
return 0;
case VIRTIO_BLK_T_SCSI_CMD:
/* TODO support SCSI commands */
complete_request_early(s, head, inhdr, VIRTIO_BLK_S_UNSUPP);
return 0;
case VIRTIO_BLK_T_FLUSH:
/* TODO fdsync not supported by Linux AIO, do it synchronously here! */
if (qemu_fdatasync(s->fd) < 0) {
complete_request_early(s, head, inhdr, VIRTIO_BLK_S_IOERR);
} else {
complete_request_early(s, head, inhdr, VIRTIO_BLK_S_OK);
}
return 0;
case VIRTIO_BLK_T_GET_ID:
do_get_id_cmd(s, in_iov, in_num, head, inhdr);
return 0;
default:
error_report("virtio-blk unsupported request type %#x", outhdr.type);
qemu_iovec_destroy(inhdr);
g_slice_free(QEMUIOVector, inhdr);
return -EFAULT;
}
}
static int flush_true(EventNotifier *e)
{
return true;
}
static void handle_notify(EventNotifier *e)
{
VirtIOBlockDataPlane *s = container_of(e, VirtIOBlockDataPlane,
host_notifier);
/* There is one array of iovecs into which all new requests are extracted
* from the vring. Requests are read from the vring and the translated
* descriptors are written to the iovecs array. The iovecs do not have to
* persist across handle_notify() calls because the kernel copies the
* iovecs on io_submit().
*
* Handling io_submit() EAGAIN may require storing the requests across
* handle_notify() calls until the kernel has sufficient resources to
* accept more I/O. This is not implemented yet.
*/
struct iovec iovec[VRING_MAX];
struct iovec *end = &iovec[VRING_MAX];
struct iovec *iov = iovec;
/* When a request is read from the vring, the index of the first descriptor
* (aka head) is returned so that the completed request can be pushed onto
* the vring later.
*
* The number of hypervisor read-only iovecs is out_num. The number of
* hypervisor write-only iovecs is in_num.
*/
int head;
unsigned int out_num = 0, in_num = 0;
unsigned int num_queued;
event_notifier_test_and_clear(&s->host_notifier);
for (;;) {
/* Disable guest->host notifies to avoid unnecessary vmexits */
vring_disable_notification(s->vdev, &s->vring);
for (;;) {
head = vring_pop(s->vdev, &s->vring, iov, end, &out_num, &in_num);
if (head < 0) {
break; /* no more requests */
}
trace_virtio_blk_data_plane_process_request(s, out_num, in_num,
head);
if (process_request(&s->ioqueue, iov, out_num, in_num, head) < 0) {
vring_set_broken(&s->vring);
break;
}
iov += out_num + in_num;
}
if (likely(head == -EAGAIN)) { /* vring emptied */
/* Re-enable guest->host notifies and stop processing the vring.
* But if the guest has snuck in more descriptors, keep processing.
*/
if (vring_enable_notification(s->vdev, &s->vring)) {
break;
}
} else { /* head == -ENOBUFS or fatal error, iovecs[] is depleted */
/* Since there are no iovecs[] left, stop processing for now. Do
* not re-enable guest->host notifies since the I/O completion
* handler knows to check for more vring descriptors anyway.
*/
break;
}
}
num_queued = ioq_num_queued(&s->ioqueue);
if (num_queued > 0) {
s->num_reqs += num_queued;
int rc = ioq_submit(&s->ioqueue);
if (unlikely(rc < 0)) {
fprintf(stderr, "ioq_submit failed %d\n", rc);
exit(1);
}
}
}
static int flush_io(EventNotifier *e)
{
VirtIOBlockDataPlane *s = container_of(e, VirtIOBlockDataPlane,
io_notifier);
return s->num_reqs > 0;
}
static void handle_io(EventNotifier *e)
{
VirtIOBlockDataPlane *s = container_of(e, VirtIOBlockDataPlane,
io_notifier);
event_notifier_test_and_clear(&s->io_notifier);
if (ioq_run_completion(&s->ioqueue, complete_request, s) > 0) {
notify_guest(s);
}
/* If there were more requests than iovecs, the vring will not be empty yet
* so check again. There should now be enough resources to process more
* requests.
*/
if (unlikely(vring_more_avail(&s->vring))) {
handle_notify(&s->host_notifier);
}
}
static void *data_plane_thread(void *opaque)
{
VirtIOBlockDataPlane *s = opaque;
do {
aio_poll(s->ctx, true);
} while (!s->stopping || s->num_reqs > 0);
return NULL;
}
static void start_data_plane_bh(void *opaque)
{
VirtIOBlockDataPlane *s = opaque;
qemu_bh_delete(s->start_bh);
s->start_bh = NULL;
qemu_thread_create(&s->thread, data_plane_thread,
s, QEMU_THREAD_JOINABLE);
}
bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *blk,
VirtIOBlockDataPlane **dataplane)
{
VirtIOBlockDataPlane *s;
int fd;
*dataplane = NULL;
if (!blk->data_plane) {
return true;
}
if (blk->scsi) {
error_report("device is incompatible with x-data-plane, use scsi=off");
return false;
}
if (blk->config_wce) {
error_report("device is incompatible with x-data-plane, "
"use config-wce=off");
return false;
}
fd = raw_get_aio_fd(blk->conf.bs);
if (fd < 0) {
error_report("drive is incompatible with x-data-plane, "
"use format=raw,cache=none,aio=native");
return false;
}
s = g_new0(VirtIOBlockDataPlane, 1);
s->vdev = vdev;
s->fd = fd;
s->blk = blk;
/* Prevent block operations that conflict with data plane thread */
bdrv_set_in_use(blk->conf.bs, 1);
error_setg(&s->migration_blocker,
"x-data-plane does not support migration");
migrate_add_blocker(s->migration_blocker);
*dataplane = s;
return true;
}
void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
{
if (!s) {
return;
}
virtio_blk_data_plane_stop(s);
migrate_del_blocker(s->migration_blocker);
error_free(s->migration_blocker);
bdrv_set_in_use(s->blk->conf.bs, 0);
g_free(s);
}
void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
{
VirtQueue *vq;
int i;
if (s->started) {
return;
}
vq = virtio_get_queue(s->vdev, 0);
if (!vring_setup(&s->vring, s->vdev, 0)) {
return;
}
s->ctx = aio_context_new();
/* Set up guest notifier (irq) */
if (s->vdev->binding->set_guest_notifiers(s->vdev->binding_opaque, 1,
true) != 0) {
fprintf(stderr, "virtio-blk failed to set guest notifier, "
"ensure -enable-kvm is set\n");
exit(1);
}
s->guest_notifier = virtio_queue_get_guest_notifier(vq);
/* Set up virtqueue notify */
if (s->vdev->binding->set_host_notifier(s->vdev->binding_opaque,
0, true) != 0) {
fprintf(stderr, "virtio-blk failed to set host notifier\n");
exit(1);
}
s->host_notifier = *virtio_queue_get_host_notifier(vq);
aio_set_event_notifier(s->ctx, &s->host_notifier, handle_notify, flush_true);
/* Set up ioqueue */
ioq_init(&s->ioqueue, s->fd, REQ_MAX);
for (i = 0; i < ARRAY_SIZE(s->requests); i++) {
ioq_put_iocb(&s->ioqueue, &s->requests[i].iocb);
}
s->io_notifier = *ioq_get_notifier(&s->ioqueue);
aio_set_event_notifier(s->ctx, &s->io_notifier, handle_io, flush_io);
s->started = true;
trace_virtio_blk_data_plane_start(s);
/* Kick right away to begin processing requests already in vring */
event_notifier_set(virtio_queue_get_host_notifier(vq));
/* Spawn thread in BH so it inherits iothread cpusets */
s->start_bh = qemu_bh_new(start_data_plane_bh, s);
qemu_bh_schedule(s->start_bh);
}
void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
{
if (!s->started || s->stopping) {
return;
}
s->stopping = true;
trace_virtio_blk_data_plane_stop(s);
/* Stop thread or cancel pending thread creation BH */
if (s->start_bh) {
qemu_bh_delete(s->start_bh);
s->start_bh = NULL;
} else {
aio_notify(s->ctx);
qemu_thread_join(&s->thread);
}
aio_set_event_notifier(s->ctx, &s->io_notifier, NULL, NULL);
ioq_cleanup(&s->ioqueue);
aio_set_event_notifier(s->ctx, &s->host_notifier, NULL, NULL);
s->vdev->binding->set_host_notifier(s->vdev->binding_opaque, 0, false);
aio_context_unref(s->ctx);
/* Clean up guest notifier (irq) */
s->vdev->binding->set_guest_notifiers(s->vdev->binding_opaque, 1, false);
vring_teardown(&s->vring);
s->started = false;
s->stopping = false;
}

View file

@ -0,0 +1,29 @@
/*
* Dedicated thread for virtio-blk I/O processing
*
* Copyright 2012 IBM, Corp.
* Copyright 2012 Red Hat, Inc. and/or its affiliates
*
* Authors:
* Stefan Hajnoczi <stefanha@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#ifndef HW_DATAPLANE_VIRTIO_BLK_H
#define HW_DATAPLANE_VIRTIO_BLK_H
#include "hw/virtio/virtio.h"
typedef struct VirtIOBlockDataPlane VirtIOBlockDataPlane;
bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *blk,
VirtIOBlockDataPlane **dataplane);
void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s);
void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s);
void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s);
void virtio_blk_data_plane_drain(VirtIOBlockDataPlane *s);
#endif /* HW_DATAPLANE_VIRTIO_BLK_H */

732
hw/block/virtio-blk.c Normal file
View file

@ -0,0 +1,732 @@
/*
* Virtio Block Device
*
* Copyright IBM, Corp. 2007
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
*/
#include "qemu-common.h"
#include "qemu/error-report.h"
#include "trace.h"
#include "hw/block/block.h"
#include "sysemu/blockdev.h"
#include "hw/virtio/virtio-blk.h"
#ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
# include "dataplane/virtio-blk.h"
#endif
#include "block/scsi.h"
#ifdef __linux__
# include <scsi/sg.h>
#endif
#include "hw/virtio/virtio-bus.h"
typedef struct VirtIOBlockReq
{
VirtIOBlock *dev;
VirtQueueElement elem;
struct virtio_blk_inhdr *in;
struct virtio_blk_outhdr *out;
struct virtio_scsi_inhdr *scsi;
QEMUIOVector qiov;
struct VirtIOBlockReq *next;
BlockAcctCookie acct;
} VirtIOBlockReq;
static void virtio_blk_req_complete(VirtIOBlockReq *req, int status)
{
VirtIOBlock *s = req->dev;
VirtIODevice *vdev = VIRTIO_DEVICE(s);
trace_virtio_blk_req_complete(req, status);
stb_p(&req->in->status, status);
virtqueue_push(s->vq, &req->elem, req->qiov.size + sizeof(*req->in));
virtio_notify(vdev, s->vq);
}
static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
bool is_read)
{
BlockErrorAction action = bdrv_get_error_action(req->dev->bs, is_read, error);
VirtIOBlock *s = req->dev;
if (action == BDRV_ACTION_STOP) {
req->next = s->rq;
s->rq = req;
} else if (action == BDRV_ACTION_REPORT) {
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
bdrv_acct_done(s->bs, &req->acct);
g_free(req);
}
bdrv_error_action(s->bs, action, is_read, error);
return action != BDRV_ACTION_IGNORE;
}
static void virtio_blk_rw_complete(void *opaque, int ret)
{
VirtIOBlockReq *req = opaque;
trace_virtio_blk_rw_complete(req, ret);
if (ret) {
bool is_read = !(ldl_p(&req->out->type) & VIRTIO_BLK_T_OUT);
if (virtio_blk_handle_rw_error(req, -ret, is_read))
return;
}
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
bdrv_acct_done(req->dev->bs, &req->acct);
g_free(req);
}
static void virtio_blk_flush_complete(void *opaque, int ret)
{
VirtIOBlockReq *req = opaque;
if (ret) {
if (virtio_blk_handle_rw_error(req, -ret, 0)) {
return;
}
}
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
bdrv_acct_done(req->dev->bs, &req->acct);
g_free(req);
}
static VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
{
VirtIOBlockReq *req = g_malloc(sizeof(*req));
req->dev = s;
req->qiov.size = 0;
req->next = NULL;
return req;
}
static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s)
{
VirtIOBlockReq *req = virtio_blk_alloc_request(s);
if (req != NULL) {
if (!virtqueue_pop(s->vq, &req->elem)) {
g_free(req);
return NULL;
}
}
return req;
}
static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
{
#ifdef __linux__
int ret;
int i;
#endif
int status = VIRTIO_BLK_S_OK;
/*
* We require at least one output segment each for the virtio_blk_outhdr
* and the SCSI command block.
*
* We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr
* and the sense buffer pointer in the input segments.
*/
if (req->elem.out_num < 2 || req->elem.in_num < 3) {
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
g_free(req);
return;
}
/*
* The scsi inhdr is placed in the second-to-last input segment, just
* before the regular inhdr.
*/
req->scsi = (void *)req->elem.in_sg[req->elem.in_num - 2].iov_base;
if (!req->dev->blk.scsi) {
status = VIRTIO_BLK_S_UNSUPP;
goto fail;
}
/*
* No support for bidirection commands yet.
*/
if (req->elem.out_num > 2 && req->elem.in_num > 3) {
status = VIRTIO_BLK_S_UNSUPP;
goto fail;
}
#ifdef __linux__
struct sg_io_hdr hdr;
memset(&hdr, 0, sizeof(struct sg_io_hdr));
hdr.interface_id = 'S';
hdr.cmd_len = req->elem.out_sg[1].iov_len;
hdr.cmdp = req->elem.out_sg[1].iov_base;
hdr.dxfer_len = 0;
if (req->elem.out_num > 2) {
/*
* If there are more than the minimally required 2 output segments
* there is write payload starting from the third iovec.
*/
hdr.dxfer_direction = SG_DXFER_TO_DEV;
hdr.iovec_count = req->elem.out_num - 2;
for (i = 0; i < hdr.iovec_count; i++)
hdr.dxfer_len += req->elem.out_sg[i + 2].iov_len;
hdr.dxferp = req->elem.out_sg + 2;
} else if (req->elem.in_num > 3) {
/*
* If we have more than 3 input segments the guest wants to actually
* read data.
*/
hdr.dxfer_direction = SG_DXFER_FROM_DEV;
hdr.iovec_count = req->elem.in_num - 3;
for (i = 0; i < hdr.iovec_count; i++)
hdr.dxfer_len += req->elem.in_sg[i].iov_len;
hdr.dxferp = req->elem.in_sg;
} else {
/*
* Some SCSI commands don't actually transfer any data.
*/
hdr.dxfer_direction = SG_DXFER_NONE;
}
hdr.sbp = req->elem.in_sg[req->elem.in_num - 3].iov_base;
hdr.mx_sb_len = req->elem.in_sg[req->elem.in_num - 3].iov_len;
ret = bdrv_ioctl(req->dev->bs, SG_IO, &hdr);
if (ret) {
status = VIRTIO_BLK_S_UNSUPP;
goto fail;
}
/*
* From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi)
* clear the masked_status field [hence status gets cleared too, see
* block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED
* status has occurred. However they do set DRIVER_SENSE in driver_status
* field. Also a (sb_len_wr > 0) indicates there is a sense buffer.
*/
if (hdr.status == 0 && hdr.sb_len_wr > 0) {
hdr.status = CHECK_CONDITION;
}
stl_p(&req->scsi->errors,
hdr.status | (hdr.msg_status << 8) |
(hdr.host_status << 16) | (hdr.driver_status << 24));
stl_p(&req->scsi->residual, hdr.resid);
stl_p(&req->scsi->sense_len, hdr.sb_len_wr);
stl_p(&req->scsi->data_len, hdr.dxfer_len);
virtio_blk_req_complete(req, status);
g_free(req);
return;
#else
abort();
#endif
fail:
/* Just put anything nonzero so that the ioctl fails in the guest. */
stl_p(&req->scsi->errors, 255);
virtio_blk_req_complete(req, status);
g_free(req);
}
typedef struct MultiReqBuffer {
BlockRequest blkreq[32];
unsigned int num_writes;
} MultiReqBuffer;
static void virtio_submit_multiwrite(BlockDriverState *bs, MultiReqBuffer *mrb)
{
int i, ret;
if (!mrb->num_writes) {
return;
}
ret = bdrv_aio_multiwrite(bs, mrb->blkreq, mrb->num_writes);
if (ret != 0) {
for (i = 0; i < mrb->num_writes; i++) {
if (mrb->blkreq[i].error) {
virtio_blk_rw_complete(mrb->blkreq[i].opaque, -EIO);
}
}
}
mrb->num_writes = 0;
}
static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb)
{
bdrv_acct_start(req->dev->bs, &req->acct, 0, BDRV_ACCT_FLUSH);
/*
* Make sure all outstanding writes are posted to the backing device.
*/
virtio_submit_multiwrite(req->dev->bs, mrb);
bdrv_aio_flush(req->dev->bs, virtio_blk_flush_complete, req);
}
static void virtio_blk_handle_write(VirtIOBlockReq *req, MultiReqBuffer *mrb)
{
BlockRequest *blkreq;
uint64_t sector;
sector = ldq_p(&req->out->sector);
bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BDRV_ACCT_WRITE);
trace_virtio_blk_handle_write(req, sector, req->qiov.size / 512);
if (sector & req->dev->sector_mask) {
virtio_blk_rw_complete(req, -EIO);
return;
}
if (req->qiov.size % req->dev->conf->logical_block_size) {
virtio_blk_rw_complete(req, -EIO);
return;
}
if (mrb->num_writes == 32) {
virtio_submit_multiwrite(req->dev->bs, mrb);
}
blkreq = &mrb->blkreq[mrb->num_writes];
blkreq->sector = sector;
blkreq->nb_sectors = req->qiov.size / BDRV_SECTOR_SIZE;
blkreq->qiov = &req->qiov;
blkreq->cb = virtio_blk_rw_complete;
blkreq->opaque = req;
blkreq->error = 0;
mrb->num_writes++;
}
static void virtio_blk_handle_read(VirtIOBlockReq *req)
{
uint64_t sector;
sector = ldq_p(&req->out->sector);
bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BDRV_ACCT_READ);
trace_virtio_blk_handle_read(req, sector, req->qiov.size / 512);
if (sector & req->dev->sector_mask) {
virtio_blk_rw_complete(req, -EIO);
return;
}
if (req->qiov.size % req->dev->conf->logical_block_size) {
virtio_blk_rw_complete(req, -EIO);
return;
}
bdrv_aio_readv(req->dev->bs, sector, &req->qiov,
req->qiov.size / BDRV_SECTOR_SIZE,
virtio_blk_rw_complete, req);
}
static void virtio_blk_handle_request(VirtIOBlockReq *req,
MultiReqBuffer *mrb)
{
uint32_t type;
if (req->elem.out_num < 1 || req->elem.in_num < 1) {
error_report("virtio-blk missing headers");
exit(1);
}
if (req->elem.out_sg[0].iov_len < sizeof(*req->out) ||
req->elem.in_sg[req->elem.in_num - 1].iov_len < sizeof(*req->in)) {
error_report("virtio-blk header not in correct element");
exit(1);
}
req->out = (void *)req->elem.out_sg[0].iov_base;
req->in = (void *)req->elem.in_sg[req->elem.in_num - 1].iov_base;
type = ldl_p(&req->out->type);
if (type & VIRTIO_BLK_T_FLUSH) {
virtio_blk_handle_flush(req, mrb);
} else if (type & VIRTIO_BLK_T_SCSI_CMD) {
virtio_blk_handle_scsi(req);
} else if (type & VIRTIO_BLK_T_GET_ID) {
VirtIOBlock *s = req->dev;
/*
* NB: per existing s/n string convention the string is
* terminated by '\0' only when shorter than buffer.
*/
strncpy(req->elem.in_sg[0].iov_base,
s->blk.serial ? s->blk.serial : "",
MIN(req->elem.in_sg[0].iov_len, VIRTIO_BLK_ID_BYTES));
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
g_free(req);
} else if (type & VIRTIO_BLK_T_OUT) {
qemu_iovec_init_external(&req->qiov, &req->elem.out_sg[1],
req->elem.out_num - 1);
virtio_blk_handle_write(req, mrb);
} else if (type == VIRTIO_BLK_T_IN || type == VIRTIO_BLK_T_BARRIER) {
/* VIRTIO_BLK_T_IN is 0, so we can't just & it. */
qemu_iovec_init_external(&req->qiov, &req->elem.in_sg[0],
req->elem.in_num - 1);
virtio_blk_handle_read(req);
} else {
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
g_free(req);
}
}
static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOBlock *s = VIRTIO_BLK(vdev);
VirtIOBlockReq *req;
MultiReqBuffer mrb = {
.num_writes = 0,
};
#ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
/* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
* dataplane here instead of waiting for .set_status().
*/
if (s->dataplane) {
virtio_blk_data_plane_start(s->dataplane);
return;
}
#endif
while ((req = virtio_blk_get_request(s))) {
virtio_blk_handle_request(req, &mrb);
}
virtio_submit_multiwrite(s->bs, &mrb);
/*
* FIXME: Want to check for completions before returning to guest mode,
* so cached reads and writes are reported as quickly as possible. But
* that should be done in the generic block layer.
*/
}
static void virtio_blk_dma_restart_bh(void *opaque)
{
VirtIOBlock *s = opaque;
VirtIOBlockReq *req = s->rq;
MultiReqBuffer mrb = {
.num_writes = 0,
};
qemu_bh_delete(s->bh);
s->bh = NULL;
s->rq = NULL;
while (req) {
virtio_blk_handle_request(req, &mrb);
req = req->next;
}
virtio_submit_multiwrite(s->bs, &mrb);
}
static void virtio_blk_dma_restart_cb(void *opaque, int running,
RunState state)
{
VirtIOBlock *s = opaque;
if (!running) {
return;
}
if (!s->bh) {
s->bh = qemu_bh_new(virtio_blk_dma_restart_bh, s);
qemu_bh_schedule(s->bh);
}
}
static void virtio_blk_reset(VirtIODevice *vdev)
{
#ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
VirtIOBlock *s = VIRTIO_BLK(vdev);
if (s->dataplane) {
virtio_blk_data_plane_stop(s->dataplane);
}
#endif
/*
* This should cancel pending requests, but can't do nicely until there
* are per-device request lists.
*/
bdrv_drain_all();
}
/* coalesce internal state, copy to pci i/o region 0
*/
static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
{
VirtIOBlock *s = VIRTIO_BLK(vdev);
struct virtio_blk_config blkcfg;
uint64_t capacity;
int blk_size = s->conf->logical_block_size;
bdrv_get_geometry(s->bs, &capacity);
memset(&blkcfg, 0, sizeof(blkcfg));
stq_raw(&blkcfg.capacity, capacity);
stl_raw(&blkcfg.seg_max, 128 - 2);
stw_raw(&blkcfg.cylinders, s->conf->cyls);
stl_raw(&blkcfg.blk_size, blk_size);
stw_raw(&blkcfg.min_io_size, s->conf->min_io_size / blk_size);
stw_raw(&blkcfg.opt_io_size, s->conf->opt_io_size / blk_size);
blkcfg.heads = s->conf->heads;
/*
* We must ensure that the block device capacity is a multiple of
* the logical block size. If that is not the case, lets use
* sector_mask to adopt the geometry to have a correct picture.
* For those devices where the capacity is ok for the given geometry
* we dont touch the sector value of the geometry, since some devices
* (like s390 dasd) need a specific value. Here the capacity is already
* cyls*heads*secs*blk_size and the sector value is not block size
* divided by 512 - instead it is the amount of blk_size blocks
* per track (cylinder).
*/
if (bdrv_getlength(s->bs) / s->conf->heads / s->conf->secs % blk_size) {
blkcfg.sectors = s->conf->secs & ~s->sector_mask;
} else {
blkcfg.sectors = s->conf->secs;
}
blkcfg.size_max = 0;
blkcfg.physical_block_exp = get_physical_block_exp(s->conf);
blkcfg.alignment_offset = 0;
blkcfg.wce = bdrv_enable_write_cache(s->bs);
memcpy(config, &blkcfg, sizeof(struct virtio_blk_config));
}
static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
{
VirtIOBlock *s = VIRTIO_BLK(vdev);
struct virtio_blk_config blkcfg;
memcpy(&blkcfg, config, sizeof(blkcfg));
bdrv_set_enable_write_cache(s->bs, blkcfg.wce != 0);
}
static uint32_t virtio_blk_get_features(VirtIODevice *vdev, uint32_t features)
{
VirtIOBlock *s = VIRTIO_BLK(vdev);
features |= (1 << VIRTIO_BLK_F_SEG_MAX);
features |= (1 << VIRTIO_BLK_F_GEOMETRY);
features |= (1 << VIRTIO_BLK_F_TOPOLOGY);
features |= (1 << VIRTIO_BLK_F_BLK_SIZE);
features |= (1 << VIRTIO_BLK_F_SCSI);
if (s->blk.config_wce) {
features |= (1 << VIRTIO_BLK_F_CONFIG_WCE);
}
if (bdrv_enable_write_cache(s->bs))
features |= (1 << VIRTIO_BLK_F_WCE);
if (bdrv_is_read_only(s->bs))
features |= 1 << VIRTIO_BLK_F_RO;
return features;
}
static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
{
VirtIOBlock *s = VIRTIO_BLK(vdev);
uint32_t features;
#ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
if (s->dataplane && !(status & (VIRTIO_CONFIG_S_DRIVER |
VIRTIO_CONFIG_S_DRIVER_OK))) {
virtio_blk_data_plane_stop(s->dataplane);
}
#endif
if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
return;
}
features = vdev->guest_features;
bdrv_set_enable_write_cache(s->bs, !!(features & (1 << VIRTIO_BLK_F_WCE)));
}
static void virtio_blk_save(QEMUFile *f, void *opaque)
{
VirtIOBlock *s = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(s);
VirtIOBlockReq *req = s->rq;
virtio_save(vdev, f);
while (req) {
qemu_put_sbyte(f, 1);
qemu_put_buffer(f, (unsigned char*)&req->elem, sizeof(req->elem));
req = req->next;
}
qemu_put_sbyte(f, 0);
}
static int virtio_blk_load(QEMUFile *f, void *opaque, int version_id)
{
VirtIOBlock *s = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(s);
int ret;
if (version_id != 2)
return -EINVAL;
ret = virtio_load(vdev, f);
if (ret) {
return ret;
}
while (qemu_get_sbyte(f)) {
VirtIOBlockReq *req = virtio_blk_alloc_request(s);
qemu_get_buffer(f, (unsigned char*)&req->elem, sizeof(req->elem));
req->next = s->rq;
s->rq = req;
virtqueue_map_sg(req->elem.in_sg, req->elem.in_addr,
req->elem.in_num, 1);
virtqueue_map_sg(req->elem.out_sg, req->elem.out_addr,
req->elem.out_num, 0);
}
return 0;
}
static void virtio_blk_resize(void *opaque)
{
VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
virtio_notify_config(vdev);
}
static const BlockDevOps virtio_block_ops = {
.resize_cb = virtio_blk_resize,
};
void virtio_blk_set_conf(DeviceState *dev, VirtIOBlkConf *blk)
{
VirtIOBlock *s = VIRTIO_BLK(dev);
memcpy(&(s->blk), blk, sizeof(struct VirtIOBlkConf));
}
static int virtio_blk_device_init(VirtIODevice *vdev)
{
DeviceState *qdev = DEVICE(vdev);
VirtIOBlock *s = VIRTIO_BLK(vdev);
VirtIOBlkConf *blk = &(s->blk);
static int virtio_blk_id;
if (!blk->conf.bs) {
error_report("drive property not set");
return -1;
}
if (!bdrv_is_inserted(blk->conf.bs)) {
error_report("Device needs media, but drive is empty");
return -1;
}
blkconf_serial(&blk->conf, &blk->serial);
if (blkconf_geometry(&blk->conf, NULL, 65535, 255, 255) < 0) {
return -1;
}
virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK,
sizeof(struct virtio_blk_config));
vdev->get_config = virtio_blk_update_config;
vdev->set_config = virtio_blk_set_config;
vdev->get_features = virtio_blk_get_features;
vdev->set_status = virtio_blk_set_status;
vdev->reset = virtio_blk_reset;
s->bs = blk->conf.bs;
s->conf = &blk->conf;
memcpy(&(s->blk), blk, sizeof(struct VirtIOBlkConf));
s->rq = NULL;
s->sector_mask = (s->conf->logical_block_size / BDRV_SECTOR_SIZE) - 1;
s->vq = virtio_add_queue(vdev, 128, virtio_blk_handle_output);
#ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
if (!virtio_blk_data_plane_create(vdev, blk, &s->dataplane)) {
virtio_common_cleanup(vdev);
return -1;
}
#endif
s->change = qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s);
register_savevm(qdev, "virtio-blk", virtio_blk_id++, 2,
virtio_blk_save, virtio_blk_load, s);
bdrv_set_dev_ops(s->bs, &virtio_block_ops, s);
bdrv_set_buffer_alignment(s->bs, s->conf->logical_block_size);
bdrv_iostatus_enable(s->bs);
add_boot_device_path(s->conf->bootindex, qdev, "/disk@0,0");
return 0;
}
static int virtio_blk_device_exit(DeviceState *dev)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtIOBlock *s = VIRTIO_BLK(dev);
#ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
virtio_blk_data_plane_destroy(s->dataplane);
s->dataplane = NULL;
#endif
qemu_del_vm_change_state_handler(s->change);
unregister_savevm(dev, "virtio-blk", s);
blockdev_mark_auto_del(s->bs);
virtio_common_cleanup(vdev);
return 0;
}
static Property virtio_blk_properties[] = {
DEFINE_VIRTIO_BLK_PROPERTIES(VirtIOBlock, blk),
DEFINE_PROP_END_OF_LIST(),
};
static void virtio_blk_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
dc->exit = virtio_blk_device_exit;
dc->props = virtio_blk_properties;
vdc->init = virtio_blk_device_init;
vdc->get_config = virtio_blk_update_config;
vdc->set_config = virtio_blk_set_config;
vdc->get_features = virtio_blk_get_features;
vdc->set_status = virtio_blk_set_status;
vdc->reset = virtio_blk_reset;
}
static const TypeInfo virtio_device_info = {
.name = TYPE_VIRTIO_BLK,
.parent = TYPE_VIRTIO_DEVICE,
.instance_size = sizeof(VirtIOBlock),
.class_init = virtio_blk_class_init,
};
static void virtio_register_types(void)
{
type_register_static(&virtio_device_info);
}
type_init(virtio_register_types)