Merge remote-tracking branch 'stefanha/block' into staging

* stefanha/block:
  commit: Remove unused check
  qemu-iotests: Update test cases for commit active
  commit: Support commit active layer
  block: Add commit_active_start()
  mirror: Move base to MirrorBlockJob
  mirror: Don't close target
  qemu-iotests: drop duplicate virtio-blk initialization failure
  vmdk: Allow vmdk_create to work with protocol
  vmdk: Check VMFS extent line field number
  docs: updated qemu-img man page and qemu-doc to reflect VHDX support.
  block: vhdx - improve error message, and .bdrv_check implementation
  block/iscsi: Fix compilation for libiscsi 1.4.0 (API change)
  qapi-schema: fix QEMU 1.8 references
  dataplane: replace hostmem with memory_region_find
  dataplane: change vring API to use VirtQueueElement
  vring: factor common code for error exits
  vring: create a common function to parse descriptors
  sheepdog: fix dynamic grow for running qcow2 format

Message-id: 1387554416-5837-1-git-send-email-stefanha@redhat.com
Signed-off-by: Anthony Liguori <aliguori@amazon.com>
This commit is contained in:
Anthony Liguori 2014-01-10 11:05:05 -08:00
commit debe40fbc5
24 changed files with 535 additions and 541 deletions

View file

@ -1 +1 @@
common-obj-y += hostmem.o vring.o
common-obj-y += vring.o

View file

@ -1,183 +0,0 @@
/*
* Thread-safe guest to host memory mapping
*
* Copyright 2012 Red Hat, Inc. and/or its affiliates
*
* Authors:
* Stefan Hajnoczi <stefanha@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "exec/address-spaces.h"
#include "hw/virtio/dataplane/hostmem.h"
static int hostmem_lookup_cmp(const void *phys_, const void *region_)
{
hwaddr phys = *(const hwaddr *)phys_;
const HostMemRegion *region = region_;
if (phys < region->guest_addr) {
return -1;
} else if (phys >= region->guest_addr + region->size) {
return 1;
} else {
return 0;
}
}
/**
* Map guest physical address to host pointer
*/
void *hostmem_lookup(HostMem *hostmem, hwaddr phys, hwaddr len, bool is_write)
{
HostMemRegion *region;
void *host_addr = NULL;
hwaddr offset_within_region;
qemu_mutex_lock(&hostmem->current_regions_lock);
region = bsearch(&phys, hostmem->current_regions,
hostmem->num_current_regions,
sizeof(hostmem->current_regions[0]),
hostmem_lookup_cmp);
if (!region) {
goto out;
}
if (is_write && region->readonly) {
goto out;
}
offset_within_region = phys - region->guest_addr;
if (len <= region->size - offset_within_region) {
host_addr = region->host_addr + offset_within_region;
}
out:
qemu_mutex_unlock(&hostmem->current_regions_lock);
return host_addr;
}
/**
* Install new regions list
*/
static void hostmem_listener_commit(MemoryListener *listener)
{
HostMem *hostmem = container_of(listener, HostMem, listener);
int i;
qemu_mutex_lock(&hostmem->current_regions_lock);
for (i = 0; i < hostmem->num_current_regions; i++) {
memory_region_unref(hostmem->current_regions[i].mr);
}
g_free(hostmem->current_regions);
hostmem->current_regions = hostmem->new_regions;
hostmem->num_current_regions = hostmem->num_new_regions;
qemu_mutex_unlock(&hostmem->current_regions_lock);
/* Reset new regions list */
hostmem->new_regions = NULL;
hostmem->num_new_regions = 0;
}
/**
* Add a MemoryRegionSection to the new regions list
*/
static void hostmem_append_new_region(HostMem *hostmem,
MemoryRegionSection *section)
{
void *ram_ptr = memory_region_get_ram_ptr(section->mr);
size_t num = hostmem->num_new_regions;
size_t new_size = (num + 1) * sizeof(hostmem->new_regions[0]);
hostmem->new_regions = g_realloc(hostmem->new_regions, new_size);
hostmem->new_regions[num] = (HostMemRegion){
.host_addr = ram_ptr + section->offset_within_region,
.guest_addr = section->offset_within_address_space,
.size = int128_get64(section->size),
.readonly = section->readonly,
.mr = section->mr,
};
hostmem->num_new_regions++;
memory_region_ref(section->mr);
}
static void hostmem_listener_append_region(MemoryListener *listener,
MemoryRegionSection *section)
{
HostMem *hostmem = container_of(listener, HostMem, listener);
/* Ignore non-RAM regions, we may not be able to map them */
if (!memory_region_is_ram(section->mr)) {
return;
}
/* Ignore regions with dirty logging, we cannot mark them dirty */
if (memory_region_is_logging(section->mr)) {
return;
}
hostmem_append_new_region(hostmem, section);
}
/* We don't implement most MemoryListener callbacks, use these nop stubs */
static void hostmem_listener_dummy(MemoryListener *listener)
{
}
static void hostmem_listener_section_dummy(MemoryListener *listener,
MemoryRegionSection *section)
{
}
static void hostmem_listener_eventfd_dummy(MemoryListener *listener,
MemoryRegionSection *section,
bool match_data, uint64_t data,
EventNotifier *e)
{
}
static void hostmem_listener_coalesced_mmio_dummy(MemoryListener *listener,
MemoryRegionSection *section,
hwaddr addr, hwaddr len)
{
}
void hostmem_init(HostMem *hostmem)
{
memset(hostmem, 0, sizeof(*hostmem));
qemu_mutex_init(&hostmem->current_regions_lock);
hostmem->listener = (MemoryListener){
.begin = hostmem_listener_dummy,
.commit = hostmem_listener_commit,
.region_add = hostmem_listener_append_region,
.region_del = hostmem_listener_section_dummy,
.region_nop = hostmem_listener_append_region,
.log_start = hostmem_listener_section_dummy,
.log_stop = hostmem_listener_section_dummy,
.log_sync = hostmem_listener_section_dummy,
.log_global_start = hostmem_listener_dummy,
.log_global_stop = hostmem_listener_dummy,
.eventfd_add = hostmem_listener_eventfd_dummy,
.eventfd_del = hostmem_listener_eventfd_dummy,
.coalesced_mmio_add = hostmem_listener_coalesced_mmio_dummy,
.coalesced_mmio_del = hostmem_listener_coalesced_mmio_dummy,
.priority = 10,
};
memory_listener_register(&hostmem->listener, &address_space_memory);
if (hostmem->num_new_regions > 0) {
hostmem_listener_commit(&hostmem->listener);
}
}
void hostmem_finalize(HostMem *hostmem)
{
memory_listener_unregister(&hostmem->listener);
g_free(hostmem->new_regions);
g_free(hostmem->current_regions);
qemu_mutex_destroy(&hostmem->current_regions_lock);
}

View file

@ -15,9 +15,53 @@
*/
#include "trace.h"
#include "hw/hw.h"
#include "exec/memory.h"
#include "exec/address-spaces.h"
#include "hw/virtio/dataplane/vring.h"
#include "qemu/error-report.h"
/* vring_map can be coupled with vring_unmap or (if you still have the
* value returned in *mr) memory_region_unref.
*/
static void *vring_map(MemoryRegion **mr, hwaddr phys, hwaddr len,
bool is_write)
{
MemoryRegionSection section = memory_region_find(get_system_memory(), phys, len);
if (!section.mr || int128_get64(section.size) < len) {
goto out;
}
if (is_write && section.readonly) {
goto out;
}
if (!memory_region_is_ram(section.mr)) {
goto out;
}
/* Ignore regions with dirty logging, we cannot mark them dirty */
if (memory_region_is_logging(section.mr)) {
goto out;
}
*mr = section.mr;
return memory_region_get_ram_ptr(section.mr) + section.offset_within_region;
out:
memory_region_unref(section.mr);
*mr = NULL;
return NULL;
}
static void vring_unmap(void *buffer, bool is_write)
{
ram_addr_t addr;
MemoryRegion *mr;
mr = qemu_ram_addr_from_host(buffer, &addr);
memory_region_unref(mr);
}
/* Map the guest's vring to host memory */
bool vring_setup(Vring *vring, VirtIODevice *vdev, int n)
{
@ -27,8 +71,7 @@ bool vring_setup(Vring *vring, VirtIODevice *vdev, int n)
vring->broken = false;
hostmem_init(&vring->hostmem);
vring_ptr = hostmem_lookup(&vring->hostmem, vring_addr, vring_size, true);
vring_ptr = vring_map(&vring->mr, vring_addr, vring_size, true);
if (!vring_ptr) {
error_report("Failed to map vring "
"addr %#" HWADDR_PRIx " size %" HWADDR_PRIu,
@ -54,7 +97,7 @@ void vring_teardown(Vring *vring, VirtIODevice *vdev, int n)
virtio_queue_set_last_avail_idx(vdev, n, vring->last_avail_idx);
virtio_queue_invalidate_signalled_used(vdev, n);
hostmem_finalize(&vring->hostmem);
memory_region_unref(vring->mr);
}
/* Disable guest->host notifies */
@ -110,14 +153,61 @@ bool vring_should_notify(VirtIODevice *vdev, Vring *vring)
return vring_need_event(vring_used_event(&vring->vr), new, old);
}
static int get_desc(Vring *vring, VirtQueueElement *elem,
struct vring_desc *desc)
{
unsigned *num;
struct iovec *iov;
hwaddr *addr;
MemoryRegion *mr;
if (desc->flags & VRING_DESC_F_WRITE) {
num = &elem->in_num;
iov = &elem->in_sg[*num];
addr = &elem->in_addr[*num];
} else {
num = &elem->out_num;
iov = &elem->out_sg[*num];
addr = &elem->out_addr[*num];
/* If it's an output descriptor, they're all supposed
* to come before any input descriptors. */
if (unlikely(elem->in_num)) {
error_report("Descriptor has out after in");
return -EFAULT;
}
}
/* Stop for now if there are not enough iovecs available. */
if (*num >= VIRTQUEUE_MAX_SIZE) {
return -ENOBUFS;
}
/* TODO handle non-contiguous memory across region boundaries */
iov->iov_base = vring_map(&mr, desc->addr, desc->len,
desc->flags & VRING_DESC_F_WRITE);
if (!iov->iov_base) {
error_report("Failed to map descriptor addr %#" PRIx64 " len %u",
(uint64_t)desc->addr, desc->len);
return -EFAULT;
}
/* The MemoryRegion is looked up again and unref'ed later, leave the
* ref in place. */
iov->iov_len = desc->len;
*addr = desc->addr;
*num += 1;
return 0;
}
/* This is stolen from linux/drivers/vhost/vhost.c. */
static int get_indirect(Vring *vring,
struct iovec iov[], struct iovec *iov_end,
unsigned int *out_num, unsigned int *in_num,
static int get_indirect(Vring *vring, VirtQueueElement *elem,
struct vring_desc *indirect)
{
struct vring_desc desc;
unsigned int i = 0, count, found = 0;
int ret;
/* Sanity check */
if (unlikely(indirect->len % sizeof(desc))) {
@ -139,11 +229,12 @@ static int get_indirect(Vring *vring,
do {
struct vring_desc *desc_ptr;
MemoryRegion *mr;
/* Translate indirect descriptor */
desc_ptr = hostmem_lookup(&vring->hostmem,
indirect->addr + found * sizeof(desc),
sizeof(desc), false);
desc_ptr = vring_map(&mr,
indirect->addr + found * sizeof(desc),
sizeof(desc), false);
if (!desc_ptr) {
error_report("Failed to map indirect descriptor "
"addr %#" PRIx64 " len %zu",
@ -153,6 +244,7 @@ static int get_indirect(Vring *vring,
return -EFAULT;
}
desc = *desc_ptr;
memory_region_unref(mr);
/* Ensure descriptor has been loaded before accessing fields */
barrier(); /* read_barrier_depends(); */
@ -170,42 +262,35 @@ static int get_indirect(Vring *vring,
return -EFAULT;
}
/* Stop for now if there are not enough iovecs available. */
if (iov >= iov_end) {
return -ENOBUFS;
}
iov->iov_base = hostmem_lookup(&vring->hostmem, desc.addr, desc.len,
desc.flags & VRING_DESC_F_WRITE);
if (!iov->iov_base) {
error_report("Failed to map indirect descriptor"
"addr %#" PRIx64 " len %u",
(uint64_t)desc.addr, desc.len);
vring->broken = true;
return -EFAULT;
}
iov->iov_len = desc.len;
iov++;
/* If this is an input descriptor, increment that count. */
if (desc.flags & VRING_DESC_F_WRITE) {
*in_num += 1;
} else {
/* If it's an output descriptor, they're all supposed
* to come before any input descriptors. */
if (unlikely(*in_num)) {
error_report("Indirect descriptor "
"has out after in: idx %u", i);
vring->broken = true;
return -EFAULT;
}
*out_num += 1;
ret = get_desc(vring, elem, &desc);
if (ret < 0) {
vring->broken |= (ret == -EFAULT);
return ret;
}
i = desc.next;
} while (desc.flags & VRING_DESC_F_NEXT);
return 0;
}
void vring_free_element(VirtQueueElement *elem)
{
int i;
/* This assumes that the iovecs, if changed, are never moved past
* the end of the valid area. This is true if iovec manipulations
* are done with iov_discard_front and iov_discard_back.
*/
for (i = 0; i < elem->out_num; i++) {
vring_unmap(elem->out_sg[i].iov_base, false);
}
for (i = 0; i < elem->in_num; i++) {
vring_unmap(elem->in_sg[i].iov_base, true);
}
g_slice_free(VirtQueueElement, elem);
}
/* This looks in the virtqueue and for the first available buffer, and converts
* it to an iovec for convenient access. Since descriptors consist of some
* number of output then some number of input descriptors, it's actually two
@ -218,16 +303,18 @@ static int get_indirect(Vring *vring,
* Stolen from linux/drivers/vhost/vhost.c.
*/
int vring_pop(VirtIODevice *vdev, Vring *vring,
struct iovec iov[], struct iovec *iov_end,
unsigned int *out_num, unsigned int *in_num)
VirtQueueElement **p_elem)
{
struct vring_desc desc;
unsigned int i, head, found = 0, num = vring->vr.num;
uint16_t avail_idx, last_avail_idx;
VirtQueueElement *elem = NULL;
int ret;
/* If there was a fatal error then refuse operation */
if (vring->broken) {
return -EFAULT;
ret = -EFAULT;
goto out;
}
/* Check it isn't doing very strange things with descriptor numbers. */
@ -238,13 +325,14 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
if (unlikely((uint16_t)(avail_idx - last_avail_idx) > num)) {
error_report("Guest moved used index from %u to %u",
last_avail_idx, avail_idx);
vring->broken = true;
return -EFAULT;
ret = -EFAULT;
goto out;
}
/* If there's nothing new since last we looked. */
if (avail_idx == last_avail_idx) {
return -EAGAIN;
ret = -EAGAIN;
goto out;
}
/* Only get avail ring entries after they have been exposed by guest. */
@ -254,32 +342,33 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
* the index we've seen. */
head = vring->vr.avail->ring[last_avail_idx % num];
elem = g_slice_new(VirtQueueElement);
elem->index = head;
elem->in_num = elem->out_num = 0;
/* If their number is silly, that's an error. */
if (unlikely(head >= num)) {
error_report("Guest says index %u > %u is available", head, num);
vring->broken = true;
return -EFAULT;
ret = -EFAULT;
goto out;
}
if (vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) {
vring_avail_event(&vring->vr) = vring->vr.avail->idx;
}
/* When we start there are none of either input nor output. */
*out_num = *in_num = 0;
i = head;
do {
if (unlikely(i >= num)) {
error_report("Desc index is %u > %u, head = %u", i, num, head);
vring->broken = true;
return -EFAULT;
ret = -EFAULT;
goto out;
}
if (unlikely(++found > num)) {
error_report("Loop detected: last one at %u vq size %u head %u",
i, num, head);
vring->broken = true;
return -EFAULT;
ret = -EFAULT;
goto out;
}
desc = vring->vr.desc[i];
@ -287,64 +376,50 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
barrier();
if (desc.flags & VRING_DESC_F_INDIRECT) {
int ret = get_indirect(vring, iov, iov_end, out_num, in_num, &desc);
int ret = get_indirect(vring, elem, &desc);
if (ret < 0) {
return ret;
goto out;
}
continue;
}
/* If there are not enough iovecs left, stop for now. The caller
* should check if there are more descs available once they have dealt
* with the current set.
*/
if (iov >= iov_end) {
return -ENOBUFS;
ret = get_desc(vring, elem, &desc);
if (ret < 0) {
goto out;
}
/* TODO handle non-contiguous memory across region boundaries */
iov->iov_base = hostmem_lookup(&vring->hostmem, desc.addr, desc.len,
desc.flags & VRING_DESC_F_WRITE);
if (!iov->iov_base) {
error_report("Failed to map vring desc addr %#" PRIx64 " len %u",
(uint64_t)desc.addr, desc.len);
vring->broken = true;
return -EFAULT;
}
iov->iov_len = desc.len;
iov++;
if (desc.flags & VRING_DESC_F_WRITE) {
/* If this is an input descriptor,
* increment that count. */
*in_num += 1;
} else {
/* If it's an output descriptor, they're all supposed
* to come before any input descriptors. */
if (unlikely(*in_num)) {
error_report("Descriptor has out after in: idx %d", i);
vring->broken = true;
return -EFAULT;
}
*out_num += 1;
}
i = desc.next;
} while (desc.flags & VRING_DESC_F_NEXT);
/* On success, increment avail index. */
vring->last_avail_idx++;
*p_elem = elem;
return head;
out:
assert(ret < 0);
if (ret == -EFAULT) {
vring->broken = true;
}
if (elem) {
vring_free_element(elem);
}
*p_elem = NULL;
return ret;
}
/* After we've used one of their buffers, we tell them about it.
*
* Stolen from linux/drivers/vhost/vhost.c.
*/
void vring_push(Vring *vring, unsigned int head, int len)
void vring_push(Vring *vring, VirtQueueElement *elem, int len)
{
struct vring_used_elem *used;
unsigned int head = elem->index;
uint16_t new;
vring_free_element(elem);
/* Don't touch vring if a fatal error occurred */
if (vring->broken) {
return;