Pull request for 5.2

NVMe fixes to solve IOMMU issues on non-x86 and error message/tracing
 improvements. Elena Afanasova's ioeventfd fixes are also included.
 
 Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEhpWov9P5fNqsNXdanKSrs4Grc8gFAl+ixjgACgkQnKSrs4Gr
 c8iZYgf+OB2eAGsdZO97fKh6VUUoRKa+BgWKuh37Cfpp3q+dLuIFMSKfU/UgprLc
 aowt6uTFfwudDV9KltUB2EiXIzpuf7JhMNOiDRkyEvYSj4KHRPsQmFCd35Nrjezy
 VvxSGafe2Z60Qnvcx+iGeMATSFX9YTcTZeHttC07v7dWn/yEK3b1hobcmjCcwWeR
 Ud8pjMyh5E2z/NpW8E669/byJf9iahx3LSQxSWt+9PVTPuftAB0Suu+m6svz1wvk
 sjVfIbtVWCp2BdGf5U6a2rEqF3+kIcFkfHp+MwgE0EdMz1wfjudaPl13a0C4DSun
 PSt9E+Ct5BTrDUvqCHvQDOaFiMZTPg==
 =Poyb
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/stefanha-gitlab/tags/block-pull-request' into staging

Pull request for 5.2

NVMe fixes to solve IOMMU issues on non-x86 and error message/tracing
improvements. Elena Afanasova's ioeventfd fixes are also included.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>

# gpg: Signature made Wed 04 Nov 2020 15:18:16 GMT
# gpg:                using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full]
# gpg:                 aka "Stefan Hajnoczi <stefanha@gmail.com>" [full]
# Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35  775A 9CA4 ABB3 81AB 73C8

* remotes/stefanha-gitlab/tags/block-pull-request: (33 commits)
  util/vfio-helpers: Assert offset is aligned to page size
  util/vfio-helpers: Convert vfio_dump_mapping to trace events
  util/vfio-helpers: Improve DMA trace events
  util/vfio-helpers: Trace where BARs are mapped
  util/vfio-helpers: Trace PCI BAR region info
  util/vfio-helpers: Trace PCI I/O config accesses
  util/vfio-helpers: Improve reporting unsupported IOMMU type
  block/nvme: Fix nvme_submit_command() on big-endian host
  block/nvme: Fix use of write-only doorbells page on Aarch64 arch
  block/nvme: Align iov's va and size on host page size
  block/nvme: Change size and alignment of prp_list_pages
  block/nvme: Change size and alignment of queue
  block/nvme: Change size and alignment of IDENTIFY response buffer
  block/nvme: Correct minimum device page size
  block/nvme: Set request_alignment at initialization
  block/nvme: Simplify nvme_cmd_sync()
  block/nvme: Simplify ADMIN queue access
  block/nvme: Correctly initialize Admin Queue Attributes
  block/nvme: Use definitions instead of magic values in add_io_queue()
  block/nvme: Introduce Completion Queue definitions
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2020-11-23 13:03:13 +00:00
commit 683685e72d
8 changed files with 195 additions and 134 deletions

View file

@ -137,6 +137,7 @@ static inline void assert_bar_index_valid(QEMUVFIOState *s, int index)
static int qemu_vfio_pci_init_bar(QEMUVFIOState *s, int index, Error **errp)
{
g_autofree char *barname = NULL;
assert_bar_index_valid(s, index);
s->bar_region_info[index] = (struct vfio_region_info) {
.index = VFIO_PCI_BAR0_REGION_INDEX + index,
@ -146,6 +147,10 @@ static int qemu_vfio_pci_init_bar(QEMUVFIOState *s, int index, Error **errp)
error_setg_errno(errp, errno, "Failed to get BAR region info");
return -errno;
}
barname = g_strdup_printf("bar[%d]", index);
trace_qemu_vfio_region_info(barname, s->bar_region_info[index].offset,
s->bar_region_info[index].size,
s->bar_region_info[index].cap_offset);
return 0;
}
@ -158,10 +163,13 @@ void *qemu_vfio_pci_map_bar(QEMUVFIOState *s, int index,
Error **errp)
{
void *p;
assert(QEMU_IS_ALIGNED(offset, qemu_real_host_page_size));
assert_bar_index_valid(s, index);
p = mmap(NULL, MIN(size, s->bar_region_info[index].size - offset),
prot, MAP_SHARED,
s->device, s->bar_region_info[index].offset + offset);
trace_qemu_vfio_pci_map_bar(index, s->bar_region_info[index].offset ,
size, offset, p);
if (p == MAP_FAILED) {
error_setg_errno(errp, errno, "Failed to map BAR region");
p = NULL;
@ -228,6 +236,10 @@ static int qemu_vfio_pci_read_config(QEMUVFIOState *s, void *buf,
{
int ret;
trace_qemu_vfio_pci_read_config(buf, ofs, size,
s->config_region_info.offset,
s->config_region_info.size);
assert(QEMU_IS_ALIGNED(s->config_region_info.offset + ofs, size));
do {
ret = pread(s->device, buf, size, s->config_region_info.offset + ofs);
} while (ret == -1 && errno == EINTR);
@ -238,6 +250,10 @@ static int qemu_vfio_pci_write_config(QEMUVFIOState *s, void *buf, int size, int
{
int ret;
trace_qemu_vfio_pci_write_config(buf, ofs, size,
s->config_region_info.offset,
s->config_region_info.size);
assert(QEMU_IS_ALIGNED(s->config_region_info.offset + ofs, size));
do {
ret = pwrite(s->device, buf, size, s->config_region_info.offset + ofs);
} while (ret == -1 && errno == EINTR);
@ -301,7 +317,7 @@ static int qemu_vfio_init_pci(QEMUVFIOState *s, const char *device,
}
if (!ioctl(s->container, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) {
error_setg_errno(errp, errno, "VFIO IOMMU check failed");
error_setg_errno(errp, errno, "VFIO IOMMU Type1 is not supported");
ret = -EINVAL;
goto fail_container;
}
@ -409,6 +425,9 @@ static int qemu_vfio_init_pci(QEMUVFIOState *s, const char *device,
ret = -errno;
goto fail;
}
trace_qemu_vfio_region_info("config", s->config_region_info.offset,
s->config_region_info.size,
s->config_region_info.cap_offset);
for (i = 0; i < ARRAY_SIZE(s->bar_region_info); i++) {
ret = qemu_vfio_pci_init_bar(s, i, errp);
@ -516,23 +535,12 @@ QEMUVFIOState *qemu_vfio_open_pci(const char *device, Error **errp)
return s;
}
static void qemu_vfio_dump_mapping(IOVAMapping *m)
{
if (QEMU_VFIO_DEBUG) {
printf(" vfio mapping %p %" PRIx64 " to %" PRIx64 "\n", m->host,
(uint64_t)m->size, (uint64_t)m->iova);
}
}
static void qemu_vfio_dump_mappings(QEMUVFIOState *s)
{
int i;
if (QEMU_VFIO_DEBUG) {
printf("vfio mappings\n");
for (i = 0; i < s->nr_mappings; ++i) {
qemu_vfio_dump_mapping(&s->mappings[i]);
}
for (int i = 0; i < s->nr_mappings; ++i) {
trace_qemu_vfio_dump_mapping(s->mappings[i].host,
s->mappings[i].iova,
s->mappings[i].size);
}
}
@ -622,7 +630,7 @@ static int qemu_vfio_do_mapping(QEMUVFIOState *s, void *host, size_t size,
.vaddr = (uintptr_t)host,
.size = size,
};
trace_qemu_vfio_do_mapping(s, host, size, iova);
trace_qemu_vfio_do_mapping(s, host, iova, size);
if (ioctl(s->container, VFIO_IOMMU_MAP_DMA, &dma_map)) {
error_report("VFIO_MAP_DMA failed: %s", strerror(errno));
@ -778,6 +786,7 @@ int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
}
}
}
trace_qemu_vfio_dma_mapped(s, host, iova0, size);
if (iova) {
*iova = iova0;
}