* kvm: enable dirty ring for arm64

* target/i386: new features
 * target/i386: AVX fixes
 * configure: create a python venv unconditionally
 * meson: bump to 0.63.0 and move tests from configure
 * meson: Pass -j option to sphinx
 * drop support for Python 3.6
 * fix check-python-tox
 * fix "make clean" in the source directory
 -----BEGIN PGP SIGNATURE-----
 
 iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmRmDYQUHHBib256aW5p
 QHJlZGhhdC5jb20ACgkQv/vSX3jHroOXSwf/WKmYPe09yHfxfVSFsSz83QpB3e+f
 KJx6FdyMMt26ZQJpcqorobrDV23R8FyxngXPkwoxqobAEtXB/AH0/S/u8RUZ46Qt
 IrF8FXr4ZdyLW7CW6nmIejmlul0iRmFD7D98E6dZ3QXfype3Ifra7gG74spZ1B44
 ZNvaomJKUK8Ga8rbChs9KtgrxlOC5q8IfTWF5ZExmZszPC9NRnZmU5Oncnuwek9T
 Ic6zDPoAeF3jDtovZhxg1HAB9e/ENZX/V9NjO92yZa8u/TITQ88l4tJctf7uiLxO
 2oGY12ln8i//pbjyUe4iM+bNh5+reAChEI8iv7WxEsj9s2HBUJ68f3tpbQ==
 =Zg00
 -----END PGP SIGNATURE-----

Merge tag 'for-upstream' of https://gitlab.com/bonzini/qemu into staging

* kvm: enable dirty ring for arm64
* target/i386: new features
* target/i386: AVX fixes
* configure: create a python venv unconditionally
* meson: bump to 0.63.0 and move tests from configure
* meson: Pass -j option to sphinx
* drop support for Python 3.6
* fix check-python-tox
* fix "make clean" in the source directory

# -----BEGIN PGP SIGNATURE-----
#
# iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmRmDYQUHHBib256aW5p
# QHJlZGhhdC5jb20ACgkQv/vSX3jHroOXSwf/WKmYPe09yHfxfVSFsSz83QpB3e+f
# KJx6FdyMMt26ZQJpcqorobrDV23R8FyxngXPkwoxqobAEtXB/AH0/S/u8RUZ46Qt
# IrF8FXr4ZdyLW7CW6nmIejmlul0iRmFD7D98E6dZ3QXfype3Ifra7gG74spZ1B44
# ZNvaomJKUK8Ga8rbChs9KtgrxlOC5q8IfTWF5ZExmZszPC9NRnZmU5Oncnuwek9T
# Ic6zDPoAeF3jDtovZhxg1HAB9e/ENZX/V9NjO92yZa8u/TITQ88l4tJctf7uiLxO
# 2oGY12ln8i//pbjyUe4iM+bNh5+reAChEI8iv7WxEsj9s2HBUJ68f3tpbQ==
# =Zg00
# -----END PGP SIGNATURE-----
# gpg: Signature made Thu 18 May 2023 04:35:32 AM PDT
# gpg:                using RSA key F13338574B662389866C7682BFFBD25F78C7AE83
# gpg:                issuer "pbonzini@redhat.com"
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [undefined]
# gpg:                 aka "Paolo Bonzini <pbonzini@redhat.com>" [undefined]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4  E2F7 7E15 100C CD36 69B1
#      Subkey fingerprint: F133 3857 4B66 2389 866C  7682 BFFB D25F 78C7 AE83

* tag 'for-upstream' of https://gitlab.com/bonzini/qemu: (68 commits)
  docs/devel: update build system docs
  configure: remove unnecessary check
  configure: reorder option parsing code
  configure: remove unnecessary mkdir
  configure: do not rerun the tests with -Werror
  configure: remove compiler sanity check
  build: move --disable-debug-info to meson
  build: move compiler version check to meson
  build: move remaining compiler flag tests to meson
  build: move warning flag selection to meson
  build: move stack protector flag selection to meson
  build: move coroutine backend selection to meson
  build: move SafeStack tests to meson
  build: move sanitizer tests to meson
  meson: prepare move of QEMU_CFLAGS to meson
  configure, meson: move --enable-modules to Meson
  configure: remove pkg-config functions
  build: move glib detection and workarounds to meson
  meson: drop unnecessary declare_dependency()
  meson: add more version numbers to the summary
  ...

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-05-18 07:52:11 -07:00
commit f0b95ab6b8
62 changed files with 2165 additions and 1352 deletions

View file

@ -12,12 +12,12 @@
- mkdir build - mkdir build
- cd build - cd build
- ../configure --enable-werror --disable-docs --enable-fdt=system - ../configure --enable-werror --disable-docs --enable-fdt=system
${LD_JOBS:+--meson=git} ${TARGETS:+--target-list="$TARGETS"} ${TARGETS:+--target-list="$TARGETS"}
$CONFIGURE_ARGS || $CONFIGURE_ARGS ||
{ cat config.log meson-logs/meson-log.txt && exit 1; } { cat config.log meson-logs/meson-log.txt && exit 1; }
- if test -n "$LD_JOBS"; - if test -n "$LD_JOBS";
then then
../meson/meson.py configure . -Dbackend_max_links="$LD_JOBS" ; pyvenv/bin/meson configure . -Dbackend_max_links="$LD_JOBS" ;
fi || exit 1; fi || exit 1;
- make -j"$JOBS" - make -j"$JOBS"
- if test -n "$MAKE_CHECK_ARGS"; - if test -n "$MAKE_CHECK_ARGS";

View file

@ -103,7 +103,7 @@ crash-test-debian:
script: script:
- cd build - cd build
- make NINJA=":" check-venv - make NINJA=":" check-venv
- tests/venv/bin/python3 scripts/device-crash-test -q --tcg-only ./qemu-system-i386 - pyvenv/bin/python3 scripts/device-crash-test -q --tcg-only ./qemu-system-i386
build-system-fedora: build-system-fedora:
extends: extends:
@ -146,8 +146,8 @@ crash-test-fedora:
script: script:
- cd build - cd build
- make NINJA=":" check-venv - make NINJA=":" check-venv
- tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc - pyvenv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc
- tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32 - pyvenv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32
build-system-centos: build-system-centos:
extends: extends:

3
.gitmodules vendored
View file

@ -49,9 +49,6 @@
[submodule "roms/qboot"] [submodule "roms/qboot"]
path = roms/qboot path = roms/qboot
url = https://gitlab.com/qemu-project/qboot.git url = https://gitlab.com/qemu-project/qboot.git
[submodule "meson"]
path = meson
url = https://gitlab.com/qemu-project/meson.git
[submodule "roms/vbootrom"] [submodule "roms/vbootrom"]
path = roms/vbootrom path = roms/vbootrom
url = https://gitlab.com/qemu-project/vbootrom.git url = https://gitlab.com/qemu-project/vbootrom.git

View file

@ -26,7 +26,7 @@ quiet-command-run = $(if $(V),,$(if $2,printf " %-7s %s\n" $2 $3 && ))$1
quiet-@ = $(if $(V),,@) quiet-@ = $(if $(V),,@)
quiet-command = $(quiet-@)$(call quiet-command-run,$1,$2,$3) quiet-command = $(quiet-@)$(call quiet-command-run,$1,$2,$3)
UNCHECKED_GOALS := %clean TAGS cscope ctags dist \ UNCHECKED_GOALS := TAGS gtags cscope ctags dist \
help check-help print-% \ help check-help print-% \
docker docker-% vm-help vm-test vm-build-% docker docker-% vm-help vm-test vm-build-%
@ -176,10 +176,8 @@ plugins:
endif # $(CONFIG_PLUGIN) endif # $(CONFIG_PLUGIN)
else # config-host.mak does not exist else # config-host.mak does not exist
config-host.mak:
ifneq ($(filter-out $(UNCHECKED_GOALS),$(MAKECMDGOALS)),$(if $(MAKECMDGOALS),,fail)) ifneq ($(filter-out $(UNCHECKED_GOALS),$(MAKECMDGOALS)),$(if $(MAKECMDGOALS),,fail))
@echo "Please call configure before running make!" $(error Please call configure before running make)
@exit 1
endif endif
endif # config-host.mak does not exist endif # config-host.mak does not exist

View file

@ -1361,6 +1361,10 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
*/ */
if (kvm_state->kvm_dirty_ring_size) { if (kvm_state->kvm_dirty_ring_size) {
kvm_dirty_ring_reap_locked(kvm_state, NULL); kvm_dirty_ring_reap_locked(kvm_state, NULL);
if (kvm_state->kvm_dirty_ring_with_bitmap) {
kvm_slot_sync_dirty_pages(mem);
kvm_slot_get_dirty_log(kvm_state, mem);
}
} else { } else {
kvm_slot_get_dirty_log(kvm_state, mem); kvm_slot_get_dirty_log(kvm_state, mem);
} }
@ -1458,6 +1462,69 @@ static int kvm_dirty_ring_reaper_init(KVMState *s)
return 0; return 0;
} }
static int kvm_dirty_ring_init(KVMState *s)
{
uint32_t ring_size = s->kvm_dirty_ring_size;
uint64_t ring_bytes = ring_size * sizeof(struct kvm_dirty_gfn);
unsigned int capability = KVM_CAP_DIRTY_LOG_RING;
int ret;
s->kvm_dirty_ring_size = 0;
s->kvm_dirty_ring_bytes = 0;
/* Bail if the dirty ring size isn't specified */
if (!ring_size) {
return 0;
}
/*
* Read the max supported pages. Fall back to dirty logging mode
* if the dirty ring isn't supported.
*/
ret = kvm_vm_check_extension(s, capability);
if (ret <= 0) {
capability = KVM_CAP_DIRTY_LOG_RING_ACQ_REL;
ret = kvm_vm_check_extension(s, capability);
}
if (ret <= 0) {
warn_report("KVM dirty ring not available, using bitmap method");
return 0;
}
if (ring_bytes > ret) {
error_report("KVM dirty ring size %" PRIu32 " too big "
"(maximum is %ld). Please use a smaller value.",
ring_size, (long)ret / sizeof(struct kvm_dirty_gfn));
return -EINVAL;
}
ret = kvm_vm_enable_cap(s, capability, 0, ring_bytes);
if (ret) {
error_report("Enabling of KVM dirty ring failed: %s. "
"Suggested minimum value is 1024.", strerror(-ret));
return -EIO;
}
/* Enable the backup bitmap if it is supported */
ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP);
if (ret > 0) {
ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP, 0);
if (ret) {
error_report("Enabling of KVM dirty ring's backup bitmap failed: "
"%s. ", strerror(-ret));
return -EIO;
}
s->kvm_dirty_ring_with_bitmap = true;
}
s->kvm_dirty_ring_size = ring_size;
s->kvm_dirty_ring_bytes = ring_bytes;
return 0;
}
static void kvm_region_add(MemoryListener *listener, static void kvm_region_add(MemoryListener *listener,
MemoryRegionSection *section) MemoryRegionSection *section)
{ {
@ -1563,7 +1630,7 @@ static void kvm_log_sync(MemoryListener *listener,
kvm_slots_unlock(); kvm_slots_unlock();
} }
static void kvm_log_sync_global(MemoryListener *l) static void kvm_log_sync_global(MemoryListener *l, bool last_stage)
{ {
KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener); KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener);
KVMState *s = kvm_state; KVMState *s = kvm_state;
@ -1582,6 +1649,12 @@ static void kvm_log_sync_global(MemoryListener *l)
mem = &kml->slots[i]; mem = &kml->slots[i];
if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
kvm_slot_sync_dirty_pages(mem); kvm_slot_sync_dirty_pages(mem);
if (s->kvm_dirty_ring_with_bitmap && last_stage &&
kvm_slot_get_dirty_log(s, mem)) {
kvm_slot_sync_dirty_pages(mem);
}
/* /*
* This is not needed by KVM_GET_DIRTY_LOG because the * This is not needed by KVM_GET_DIRTY_LOG because the
* ioctl will unconditionally overwrite the whole region. * ioctl will unconditionally overwrite the whole region.
@ -2521,35 +2594,9 @@ static int kvm_init(MachineState *ms)
* Enable KVM dirty ring if supported, otherwise fall back to * Enable KVM dirty ring if supported, otherwise fall back to
* dirty logging mode * dirty logging mode
*/ */
if (s->kvm_dirty_ring_size > 0) { ret = kvm_dirty_ring_init(s);
uint64_t ring_bytes; if (ret < 0) {
goto err;
ring_bytes = s->kvm_dirty_ring_size * sizeof(struct kvm_dirty_gfn);
/* Read the max supported pages */
ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING);
if (ret > 0) {
if (ring_bytes > ret) {
error_report("KVM dirty ring size %" PRIu32 " too big "
"(maximum is %ld). Please use a smaller value.",
s->kvm_dirty_ring_size,
(long)ret / sizeof(struct kvm_dirty_gfn));
ret = -EINVAL;
goto err;
}
ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING, 0, ring_bytes);
if (ret) {
error_report("Enabling of KVM dirty ring failed: %s. "
"Suggested minimum value is 1024.", strerror(-ret));
goto err;
}
s->kvm_dirty_ring_bytes = ring_bytes;
} else {
warn_report("KVM dirty ring not available, using bitmap method");
s->kvm_dirty_ring_size = 0;
}
} }
/* /*
@ -3710,6 +3757,7 @@ static void kvm_accel_instance_init(Object *obj)
s->kernel_irqchip_split = ON_OFF_AUTO_AUTO; s->kernel_irqchip_split = ON_OFF_AUTO_AUTO;
/* KVM dirty ring is by default off */ /* KVM dirty ring is by default off */
s->kvm_dirty_ring_size = 0; s->kvm_dirty_ring_size = 0;
s->kvm_dirty_ring_with_bitmap = false;
s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN; s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN;
s->notify_window = 0; s->notify_window = 0;
s->xen_version = 0; s->xen_version = 0;

View file

@ -72,11 +72,13 @@ static void rr_kick_next_cpu(void)
{ {
CPUState *cpu; CPUState *cpu;
do { do {
cpu = qatomic_mb_read(&rr_current_cpu); cpu = qatomic_read(&rr_current_cpu);
if (cpu) { if (cpu) {
cpu_exit(cpu); cpu_exit(cpu);
} }
} while (cpu != qatomic_mb_read(&rr_current_cpu)); /* Finish kicking this cpu before reading again. */
smp_mb();
} while (cpu != qatomic_read(&rr_current_cpu));
} }
static void rr_kick_thread(void *opaque) static void rr_kick_thread(void *opaque)
@ -241,8 +243,9 @@ static void *rr_cpu_thread_fn(void *arg)
} }
while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) { while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
/* Store rr_current_cpu before evaluating cpu_can_run(). */
qatomic_mb_set(&rr_current_cpu, cpu); qatomic_mb_set(&rr_current_cpu, cpu);
current_cpu = cpu; current_cpu = cpu;
qemu_clock_enable(QEMU_CLOCK_VIRTUAL, qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
@ -280,7 +283,7 @@ static void *rr_cpu_thread_fn(void *arg)
cpu = CPU_NEXT(cpu); cpu = CPU_NEXT(cpu);
} /* while (cpu && !cpu->exit_request).. */ } /* while (cpu && !cpu->exit_request).. */
/* Does not need qatomic_mb_set because a spurious wakeup is okay. */ /* Does not need a memory barrier because a spurious wakeup is okay. */
qatomic_set(&rr_current_cpu, NULL); qatomic_set(&rr_current_cpu, NULL);
if (cpu && cpu->exit_request) { if (cpu && cpu->exit_request) {

980
configure vendored

File diff suppressed because it is too large Load diff

View file

@ -3,7 +3,7 @@
# This Makefile example is fairly independent from the main makefile # This Makefile example is fairly independent from the main makefile
# so users can take and adapt it for their build. We only really # so users can take and adapt it for their build. We only really
# include config-host.mak so we don't have to repeat probing for # include config-host.mak so we don't have to repeat probing for
# cflags that the main configure has already done for us. # programs that the main configure has already done for us.
# #
BUILD_DIR := $(CURDIR)/../.. BUILD_DIR := $(CURDIR)/../..
@ -26,9 +26,8 @@ SONAMES := $(addsuffix .so,$(addprefix lib,$(NAMES)))
# The main QEMU uses Glib extensively so it's perfectly fine to use it # The main QEMU uses Glib extensively so it's perfectly fine to use it
# in plugins (which many example do). # in plugins (which many example do).
CFLAGS = $(GLIB_CFLAGS) CFLAGS := $(shell $(PKG_CONFIG) --cflags glib-2.0)
CFLAGS += -fPIC -Wall $(filter -W%, $(QEMU_CFLAGS)) CFLAGS += -fPIC -Wall
CFLAGS += $(if $(findstring no-psabi,$(QEMU_CFLAGS)),-Wpsabi)
CFLAGS += $(if $(CONFIG_DEBUG_TCG), -ggdb -O0) CFLAGS += $(if $(CONFIG_DEBUG_TCG), -ggdb -O0)
CFLAGS += -I$(SRC_PATH)/include/qemu CFLAGS += -I$(SRC_PATH)/include/qemu

View file

@ -98,7 +98,7 @@ Python runtime
option of the ``configure`` script to point QEMU to a supported option of the ``configure`` script to point QEMU to a supported
version of the Python runtime. version of the Python runtime.
As of QEMU |version|, the minimum supported version of Python is 3.6. As of QEMU |version|, the minimum supported version of Python is 3.7.
Python build dependencies Python build dependencies
Some of QEMU's build dependencies are written in Python. Usually these Some of QEMU's build dependencies are written in Python. Usually these

View file

@ -32,15 +32,6 @@ import sphinx
from distutils.version import LooseVersion from distutils.version import LooseVersion
from sphinx.errors import ConfigError from sphinx.errors import ConfigError
# Make Sphinx fail cleanly if using an old Python, rather than obscurely
# failing because some code in one of our extensions doesn't work there.
# In newer versions of Sphinx this will display nicely; in older versions
# Sphinx will also produce a Python backtrace but at least the information
# gets printed...
if sys.version_info < (3,6):
raise ConfigError(
"QEMU requires a Sphinx that uses Python 3.6 or better\n")
# The per-manual conf.py will set qemu_docdir for a single-manual build; # The per-manual conf.py will set qemu_docdir for a single-manual build;
# otherwise set it here if this is an entire-manual-set build. # otherwise set it here if this is an entire-manual-set build.
# This is always the absolute path of the docs/ directory in the source tree. # This is always the absolute path of the docs/ directory in the source tree.

View file

@ -61,19 +61,19 @@ Under ``tests/avocado/`` as the root we have:
:: ::
$ make check-venv (needed only the first time to create the venv) $ make check-venv (needed only the first time to create the venv)
$ ./tests/venv/bin/avocado run -t acpi tests/avocado $ ./pyvenv/bin/avocado run -t acpi tests/avocado
The above will run all acpi avocado tests including this one. The above will run all acpi avocado tests including this one.
In order to run the individual tests, perform the following: In order to run the individual tests, perform the following:
:: ::
$ ./tests/venv/bin/avocado run tests/avocado/acpi-bits.py --tap - $ ./pyvenv/bin/avocado run tests/avocado/acpi-bits.py --tap -
The above will produce output in tap format. You can omit "--tap -" in the The above will produce output in tap format. You can omit "--tap -" in the
end and it will produce output like the following: end and it will produce output like the following:
:: ::
$ ./tests/venv/bin/avocado run tests/avocado/acpi-bits.py $ ./pyvenv/bin/avocado run tests/avocado/acpi-bits.py
Fetching asset from tests/avocado/acpi-bits.py:AcpiBitsTest.test_acpi_smbios_bits Fetching asset from tests/avocado/acpi-bits.py:AcpiBitsTest.test_acpi_smbios_bits
JOB ID : eab225724da7b64c012c65705dc2fa14ab1defef JOB ID : eab225724da7b64c012c65705dc2fa14ab1defef
JOB LOG : /home/anisinha/avocado/job-results/job-2022-10-10T17.58-eab2257/job.log JOB LOG : /home/anisinha/avocado/job-results/job-2022-10-10T17.58-eab2257/job.log

View file

@ -4,30 +4,14 @@ The QEMU build system architecture
This document aims to help developers understand the architecture of the This document aims to help developers understand the architecture of the
QEMU build system. As with projects using GNU autotools, the QEMU build QEMU build system. As with projects using GNU autotools, the QEMU build
system has two stages, first the developer runs the "configure" script system has two stages; first the developer runs the "configure" script
to determine the local build environment characteristics, then they run to determine the local build environment characteristics, then they run
"make" to build the project. There is about where the similarities with "make" to build the project. This is about where the similarities with
GNU autotools end, so try to forget what you know about them. GNU autotools end, so try to forget what you know about them.
The two general ways to perform a build are as follows:
Stage 1: configure - build artifacts outside of QEMU source tree entirely::
==================
The QEMU configure script is written directly in shell, and should be
compatible with any POSIX shell, hence it uses #!/bin/sh. An important
implication of this is that it is important to avoid using bash-isms on
development platforms where bash is the primary host.
In contrast to autoconf scripts, QEMU's configure is expected to be
silent while it is checking for features. It will only display output
when an error occurs, or to show the final feature enablement summary
on completion.
Because QEMU uses the Meson build system under the hood, only VPATH
builds are supported. There are two general ways to invoke configure &
perform a build:
- VPATH, build artifacts outside of QEMU source tree entirely::
cd ../ cd ../
mkdir build mkdir build
@ -35,88 +19,122 @@ perform a build:
../qemu/configure ../qemu/configure
make make
- VPATH, build artifacts in a subdir of QEMU source tree:: - build artifacts in a subdir of QEMU source tree::
mkdir build mkdir build
cd build cd build
../configure ../configure
make make
The configure script automatically recognizes Most of the actual build process uses Meson under the hood, therefore
command line options for which a same-named Meson option exists; build artifacts cannot be placed in the source tree itself.
dashes in the command line are replaced with underscores.
Many checks on the compilation environment are still found in configure
rather than ``meson.build``, but new checks should be added directly to
``meson.build``.
Patches are also welcome to move existing checks from the configure Stage 1: configure
phase to ``meson.build``. When doing so, ensure that ``meson.build`` does ==================
not use anymore the keys that you have removed from ``config-host.mak``.
Typically these will be replaced in ``meson.build`` by boolean variables,
``get_option('optname')`` invocations, or ``dep.found()`` expressions.
In general, the remaining checks have little or no interdependencies,
so they can be moved one by one.
Helper functions The configure script has five tasks:
----------------
The configure script provides a variety of helper functions to assist - detect the host architecture
developers in checking for system features:
``do_cc $ARGS...`` - list the targets for which to build emulators; the list of
Attempt to run the system C compiler passing it $ARGS... targets also affects which firmware binaries and tests to build
``do_cxx $ARGS...`` - find the compilers (native and cross) used to build executables,
Attempt to run the system C++ compiler passing it $ARGS... firmware and tests. The results are written as either Makefile
fragments (``config-host.mak``) or a Meson machine file
(``config-meson.cross``)
``compile_object $CFLAGS`` - create a virtual environment in which all Python code runs during
Attempt to compile a test program with the system C compiler using the build, and possibly install packages into it from PyPI
$CFLAGS. The test program must have been previously written to a file
called $TMPC. The replacement in Meson is the compiler object ``cc``,
which has methods such as ``cc.compiles()``,
``cc.check_header()``, ``cc.has_function()``.
``compile_prog $CFLAGS $LDFLAGS`` - invoke Meson in the virtual environment, to perform the actual
Attempt to compile a test program with the system C compiler using configuration step for the emulator build
$CFLAGS and link it with the system linker using $LDFLAGS. The test
program must have been previously written to a file called $TMPC. The configure script automatically recognizes command line options for
The replacement in Meson is ``cc.find_library()`` and ``cc.links()``. which a same-named Meson option exists; dashes in the command line are
replaced with underscores.
Almost all QEMU developers that need to modify the build system will
only be concerned with Meson, and therefore can skip the rest of this
section.
Modifying ``configure``
-----------------------
``configure`` is a shell script; it uses ``#!/bin/sh`` and therefore
should be compatible with any POSIX shell. It is important to avoid
using bash-isms to avoid breaking development platforms where bash is
the primary host.
The configure script provides a variety of functions to help writing
portable shell code and providing consistent behavior across architectures
and operating systems:
``error_exit $MESSAGE $MORE...``
Print $MESSAGE to stderr, followed by $MORE... and then exit from the
configure script with non-zero status.
``has $COMMAND`` ``has $COMMAND``
Determine if $COMMAND exists in the current environment, either as a Determine if $COMMAND exists in the current environment, either as a
shell builtin, or executable binary, returning 0 on success. The shell builtin, or executable binary, returning 0 on success. The
replacement in Meson is ``find_program()``. replacement in Meson is ``find_program()``.
``check_define $NAME`` ``probe_target_compiler $TARGET``
Determine if the macro $NAME is defined by the system C compiler Detect a cross compiler and cross tools for the QEMU target $TARGET (e.g.,
``$CPU-softmmu``, ``$CPU-linux-user``, ``$CPU-bsd-user``). If a working
compiler is present, return success and set variables ``$target_cc``,
``$target_ar``, etc. to non-empty values.
``check_include $NAME`` ``write_target_makefile``
Determine if the include $NAME file is available to the system C Write a Makefile fragment to stdout, exposing the result of the most
compiler. The replacement in Meson is ``cc.has_header()``. ``probe_target_compiler`` call as the usual Make variables (``CC``,
``AR``, ``LD``, etc.).
Configure does not generally perform tests for compiler options beyond
basic checks to detect the host platform and ensure the compiler is
functioning. These are performed using a few more helper functions:
``compile_object $CFLAGS``
Attempt to compile a test program with the system C compiler using
$CFLAGS. The test program must have been previously written to a file
called $TMPC.
``compile_prog $CFLAGS $LDFLAGS``
Attempt to compile a test program with the system C compiler using
$CFLAGS and link it with the system linker using $LDFLAGS. The test
program must have been previously written to a file called $TMPC.
``check_define $NAME``
Determine if the macro $NAME is defined by the system C compiler.
``do_compiler $CC $ARGS...``
Attempt to run the C compiler $CC, passing it $ARGS... This function
does not use flags passed via options such as ``--extra-cflags``, and
therefore can be used to check for cross compilers. However, most
such checks are done at ``make`` time instead (see for example the
``cc-option`` macro in ``pc-bios/option-rom/Makefile``).
``write_c_skeleton`` ``write_c_skeleton``
Write a minimal C program main() function to the temporary file Write a minimal C program main() function to the temporary file
indicated by $TMPC indicated by $TMPC.
``error_exit $MESSAGE $MORE...``
Print $MESSAGE to stderr, followed by $MORE... and then exit from the
configure script with non-zero status
``query_pkg_config $ARGS...`` Python virtual environments and the QEMU build system
Run pkg-config passing it $ARGS. If QEMU is doing a static build, -----------------------------------------------------
then --static will be automatically added to $ARGS
TBD
Stage 2: Meson Stage 2: Meson
============== ==============
The Meson build system is currently used to describe the build The Meson build system describes the build and install process for:
process for:
1) executables, which include: 1) executables, which include:
- Tools - ``qemu-img``, ``qemu-nbd``, ``qga`` (guest agent), etc - Tools - ``qemu-img``, ``qemu-nbd``, ``qemu-ga`` (guest agent), etc
- System emulators - ``qemu-system-$ARCH`` - System emulators - ``qemu-system-$ARCH``
@ -126,7 +144,8 @@ process for:
2) documentation 2) documentation
3) ROMs, which can be either installed as binary blobs or compiled 3) ROMs, whether provided as binary blobs in the QEMU distributions
or cross compiled under the direction of the configure script
4) other data files, such as icons or desktop files 4) other data files, such as icons or desktop files
@ -281,8 +300,7 @@ system/userspace emulation target
Adding checks Adding checks
------------- -------------
New checks should be added to Meson. Compiler checks can be as simple as Compiler checks can be as simple as the following::
the following::
config_host_data.set('HAVE_BTRFS_H', cc.has_header('linux/btrfs.h')) config_host_data.set('HAVE_BTRFS_H', cc.has_header('linux/btrfs.h'))
@ -311,8 +329,7 @@ dependency will be used::
sdl_image = not_found sdl_image = not_found
if not get_option('sdl_image').auto() or have_system if not get_option('sdl_image').auto() or have_system
sdl_image = dependency('SDL2_image', required: get_option('sdl_image'), sdl_image = dependency('SDL2_image', required: get_option('sdl_image'),
method: 'pkg-config', method: 'pkg-config')
static: enable_static)
endif endif
This avoids warnings on static builds of user-mode emulators, for example. This avoids warnings on static builds of user-mode emulators, for example.
@ -360,22 +377,30 @@ script, which may point to something other than the first python3
binary on the path. binary on the path.
Stage 3: makefiles Stage 3: Make
================== =============
The use of GNU make is required with the QEMU build system. The next step in building QEMU is to invoke make. GNU Make is required
to build QEMU, and may be installed as ``gmake`` on some hosts.
The output of Meson is a build.ninja file, which is used with the Ninja The output of Meson is a ``build.ninja`` file, which is used with the
build system. QEMU uses a different approach, where Makefile rules are Ninja build tool. However, QEMU's build comprises other components than
synthesized from the build.ninja file. The main Makefile includes these just the emulators (namely firmware and the tests in ``tests/tcg``) which
rules and wraps them so that e.g. submodules are built before QEMU. need different cross compilers. The QEMU Makefile wraps both Ninja and
The resulting build system is largely non-recursive in nature, in the smaller build systems for firmware and tests; it also takes care of
contrast to common practices seen with automake. running ``configure`` again when the script changes. Apart from invoking
these sub-Makefiles, the resulting build is largely non-recursive.
Tests are also ran by the Makefile with the traditional ``make check`` Tests, whether defined in ``meson.build`` or not, are also ran by the
phony target, while benchmarks are run with ``make bench``. Meson test Makefile with the traditional ``make check`` phony target, while benchmarks
suites such as ``unit`` can be ran with ``make check-unit`` too. It is also are run with ``make bench``. Meson test suites such as ``unit`` can be ran
possible to run tests defined in meson.build with ``meson test``. with ``make check-unit``, and ``make check-tcg`` builds and runs "non-Meson"
tests for all targets.
If desired, it is also possible to use ``ninja`` and ``meson test``,
respectively to build emulators and run tests defined in meson.build.
The main difference is that ``make`` needs the ``-jN`` flag in order to
enable parallel builds or tests.
Useful make targets Useful make targets
------------------- -------------------
@ -387,6 +412,7 @@ Useful make targets
Print the value of the variable VAR. Useful for debugging the build Print the value of the variable VAR. Useful for debugging the build
system. system.
Important files for the build system Important files for the build system
==================================== ====================================
@ -400,8 +426,7 @@ number of dynamically created files listed later.
``Makefile`` ``Makefile``
The main entry point used when invoking make to build all the components The main entry point used when invoking make to build all the components
of QEMU. The default 'all' target will naturally result in the build of of QEMU. The default 'all' target will naturally result in the build of
every component. Makefile takes care of recursively building submodules every component.
directly via a non-recursive set of rules.
``*/meson.build`` ``*/meson.build``
The meson.build file in the root directory is the main entry point for the The meson.build file in the root directory is the main entry point for the
@ -410,59 +435,92 @@ number of dynamically created files listed later.
other meson.build files spread throughout the QEMU source tree. other meson.build files spread throughout the QEMU source tree.
``tests/Makefile.include`` ``tests/Makefile.include``
Rules for external test harnesses. These include the TCG tests, Rules for external test harnesses. These include the TCG tests
``qemu-iotests`` and the Avocado-based integration tests. and the Avocado-based integration tests.
``tests/docker/Makefile.include`` ``tests/docker/Makefile.include``
Rules for Docker tests. Like tests/Makefile, this file is included Rules for Docker tests. Like ``tests/Makefile.include``, this file is
directly by the top level Makefile, anything defined in this file will included directly by the top level Makefile, anything defined in this
influence the entire build system. file will influence the entire build system.
``tests/vm/Makefile.include`` ``tests/vm/Makefile.include``
Rules for VM-based tests. Like tests/Makefile, this file is included Rules for VM-based tests. Like ``tests/Makefile.include``, this file is
directly by the top level Makefile, anything defined in this file will included directly by the top level Makefile, anything defined in this
influence the entire build system. file will influence the entire build system.
Dynamically created files Dynamically created files
------------------------- -------------------------
The following files are generated dynamically by configure in order to The following files are generated at run-time in order to control the
control the behaviour of the statically defined makefiles. This avoids behaviour of the Makefiles. This avoids the need for QEMU makefiles to
the need for QEMU makefiles to go through any pre-processing as seen go through any pre-processing as seen with autotools, where configure
with autotools, where Makefile.am generates Makefile.in which generates generates ``Makefile`` from ``Makefile.in``.
Makefile.
Built by configure: Built by configure:
``config-host.mak`` ``config-host.mak``
When configure has determined the characteristics of the build host it When configure has determined the characteristics of the build host it
will write a long list of variables to config-host.mak file. This will write them to this file for use in ``Makefile`` and to a smaller
provides the various install directories, compiler / linker flags and a extent ``meson.build``. These include the paths to various tools and a
variety of ``CONFIG_*`` variables related to optionally enabled features. variety of ``CONFIG_*`` variables related to optionally enabled features.
This is imported by the top level Makefile and meson.build in order to
tailor the build output.
config-host.mak is also used as a dependency checking mechanism. If make ``config-host.mak`` is also used as a dependency checking mechanism. If make
sees that the modification timestamp on configure is newer than that on sees that the modification timestamp on configure is newer than that on
config-host.mak, then configure will be re-run. ``config-host.mak``, then configure will be re-run.
The variables defined here are those which are applicable to all QEMU The variables defined here apply to all QEMU
build outputs. Variables which are potentially different for each build outputs.
emulator target are defined by the next file...
``config-meson.cross``
A Meson "cross file" (or native file) used to communicate the paths to
the toolchain and other configuration options.
``config.status``
A small shell script that will invoke configure again with the same
environment variables that were set during the first run. It's used to
rerun configure after changes to the source code, but it can also be
inspected manually to check the contents of the environment.
``Makefile.prereqs``
A set of Makefile dependencies that order the build and execution of
firmware and tests after the container images and emulators that they
need.
``pc-bios/*/config.mak``, ``tests/tcg/config-host.mak``, ``tests/tcg/*/config-target.mak``
Configuration variables used to build the firmware and TCG tests,
including paths to cross compilation toolchains.
``pyvenv``
A Python virtual environment that is used for all Python code running
during the build. Using a virtual environment ensures that even code
that is run via ``sphinx-build``, ``meson`` etc. uses the same interpreter
and packages.
Built by Meson: Built by Meson:
``${TARGET-NAME}-config-devices.mak`` ``config-host.h``
TARGET-NAME is again the name of a system or userspace emulator. The Used by C code to determine the properties of the build environment
config-devices.mak file is automatically generated by make using the and the set of enabled features for the entire build.
scripts/make_device_config.sh program, feeding it the
default-configs/$TARGET-NAME file as input.
``config-host.h``, ``$TARGET_NAME-config-target.h``, ``$TARGET_NAME-config-devices.h`` ``${TARGET-NAME}-config-devices.mak``
These files are used by source code to determine what features are TARGET-NAME is the name of a system emulator. The file is
enabled. They are generated from the contents of the corresponding generated by Meson using files under ``configs/devices`` as input.
``*.mak`` files using Meson's ``configure_file()`` function.
``${TARGET-NAME}-config-target.mak``
TARGET-NAME is the name of a system or usermode emulator. The file is
generated by Meson using files under ``configs/targets`` as input.
``$TARGET_NAME-config-target.h``, ``$TARGET_NAME-config-devices.h``
Used by C code to determine the properties and enabled
features for each target. enabled. They are generated from
the contents of the corresponding ``*.mak`` files using Meson's
``configure_file()`` function; each target can include them using
the ``CONFIG_TARGET`` and ``CONFIG_DEVICES`` macro respectively.
``build.ninja`` ``build.ninja``
The build rules. The build rules.

View file

@ -888,9 +888,9 @@ You can run the avocado tests simply by executing:
make check-avocado make check-avocado
This involves the automatic creation of Python virtual environment This involves the automatic installation, from PyPI, of all the
within the build tree (at ``tests/venv``) which will have all the necessary avocado-framework dependencies into the QEMU venv within the
right dependencies, and will save tests results also within the build tree (at ``./pyvenv``). Test results are also saved within the
build tree (at ``tests/results``). build tree (at ``tests/results``).
Note: the build environment must be using a Python 3 stack, and have Note: the build environment must be using a Python 3 stack, and have
@ -947,7 +947,7 @@ may be invoked by running:
.. code:: .. code::
tests/venv/bin/avocado run $OPTION1 $OPTION2 tests/avocado/ pyvenv/bin/avocado run $OPTION1 $OPTION2 tests/avocado/
Note that if ``make check-avocado`` was not executed before, it is Note that if ``make check-avocado`` was not executed before, it is
possible to create the Python virtual environment with the dependencies possible to create the Python virtual environment with the dependencies
@ -962,20 +962,20 @@ a test file. To run tests from a single file within the build tree, use:
.. code:: .. code::
tests/venv/bin/avocado run tests/avocado/$TESTFILE pyvenv/bin/avocado run tests/avocado/$TESTFILE
To run a single test within a test file, use: To run a single test within a test file, use:
.. code:: .. code::
tests/venv/bin/avocado run tests/avocado/$TESTFILE:$TESTCLASS.$TESTNAME pyvenv/bin/avocado run tests/avocado/$TESTFILE:$TESTCLASS.$TESTNAME
Valid test names are visible in the output from any previous execution Valid test names are visible in the output from any previous execution
of Avocado or ``make check-avocado``, and can also be queried using: of Avocado or ``make check-avocado``, and can also be queried using:
.. code:: .. code::
tests/venv/bin/avocado list tests/avocado pyvenv/bin/avocado list tests/avocado
Manual Installation Manual Installation
~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~

View file

@ -1,4 +1,4 @@
sphinx_build = find_program(get_option('sphinx_build'), sphinx_build = find_program(fs.parent(python.full_path()) / 'sphinx-build',
required: get_option('docs')) required: get_option('docs'))
# Check if tools are available to build documentation. # Check if tools are available to build documentation.
@ -10,6 +10,18 @@ if sphinx_build.found()
SPHINX_ARGS += [ '-W', '-Dkerneldoc_werror=1' ] SPHINX_ARGS += [ '-W', '-Dkerneldoc_werror=1' ]
endif endif
sphinx_version = run_command(SPHINX_ARGS + ['--version'],
check: true).stdout().split()[1]
if sphinx_version.version_compare('>=1.7.0')
SPHINX_ARGS += ['-j', 'auto']
else
nproc = find_program('nproc')
if nproc.found()
jobs = run_command(nproc, check: true).stdout()
SPHINX_ARGS += ['-j', jobs]
endif
endif
# This is a bit awkward but works: create a trivial document and # This is a bit awkward but works: create a trivial document and
# try to run it with our configuration file (which enforces a # try to run it with our configuration file (which enforces a
# version requirement). This will fail if sphinx-build is too old. # version requirement). This will fail if sphinx-build is too old.

View file

@ -400,6 +400,10 @@ class DBusDomain(Domain):
for refname, obj in self.objects.items(): for refname, obj in self.objects.items():
yield (refname, refname, obj.objtype, obj.docname, obj.node_id, 1) yield (refname, refname, obj.objtype, obj.docname, obj.node_id, 1)
def merge_domaindata(self, docnames, otherdata):
for name, obj in otherdata['objects'].items():
if obj.docname in docnames:
self.data['objects'][name] = obj
def setup(app): def setup(app):
app.add_domain(DBusDomain) app.add_domain(DBusDomain)

View file

@ -23,3 +23,8 @@ class FakeDBusDocDirective(Directive):
def setup(app: Sphinx) -> Dict[str, Any]: def setup(app: Sphinx) -> Dict[str, Any]:
"""Register a fake dbus-doc directive with Sphinx""" """Register a fake dbus-doc directive with Sphinx"""
app.add_directive("dbus-doc", FakeDBusDocDirective) app.add_directive("dbus-doc", FakeDBusDocDirective)
return dict(
parallel_read_safe = True,
parallel_write_safe = True
)

View file

@ -41,3 +41,8 @@ def setup(sphinx):
sphinx.add_lexer('QMP', QMPExampleLexer) sphinx.add_lexer('QMP', QMPExampleLexer)
except errors.VersionRequirementError: except errors.VersionRequirementError:
sphinx.add_lexer('QMP', QMPExampleLexer()) sphinx.add_lexer('QMP', QMPExampleLexer())
return dict(
parallel_read_safe = True,
parallel_write_safe = True
)

View file

@ -191,12 +191,16 @@ static int scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s, int len)
if ((s->type == TYPE_DISK || s->type == TYPE_ZBC) && if ((s->type == TYPE_DISK || s->type == TYPE_ZBC) &&
(r->req.cmd.buf[1] & 0x01)) { (r->req.cmd.buf[1] & 0x01)) {
page = r->req.cmd.buf[2]; page = r->req.cmd.buf[2];
if (page == 0xb0) { if (page == 0xb0 && r->buflen >= 8) {
uint8_t buf[16] = {};
uint8_t buf_used = MIN(r->buflen, 16);
uint64_t max_transfer = calculate_max_transfer(s); uint64_t max_transfer = calculate_max_transfer(s);
stl_be_p(&r->buf[8], max_transfer);
/* Also take care of the opt xfer len. */ memcpy(buf, r->buf, buf_used);
stl_be_p(&r->buf[12], stl_be_p(&buf[8], max_transfer);
MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12]))); stl_be_p(&buf[12], MIN_NON_ZERO(max_transfer, ldl_be_p(&buf[12])));
memcpy(r->buf + 8, buf + 8, buf_used - 8);
} else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) { } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) {
/* /*
* Now we're capable of supplying the VPD Block Limits * Now we're capable of supplying the VPD Block Limits

View file

@ -934,8 +934,11 @@ struct MemoryListener {
* its @log_sync must be NULL. Vice versa. * its @log_sync must be NULL. Vice versa.
* *
* @listener: The #MemoryListener. * @listener: The #MemoryListener.
* @last_stage: The last stage to synchronize the log during migration.
* The caller should gurantee that the synchronization with true for
* @last_stage is triggered for once after all VCPUs have been stopped.
*/ */
void (*log_sync_global)(MemoryListener *listener); void (*log_sync_global)(MemoryListener *listener, bool last_stage);
/** /**
* @log_clear: * @log_clear:
@ -2422,8 +2425,10 @@ MemoryRegionSection memory_region_find(MemoryRegion *mr,
* memory_global_dirty_log_sync: synchronize the dirty log for all memory * memory_global_dirty_log_sync: synchronize the dirty log for all memory
* *
* Synchronizes the dirty page log for all address spaces. * Synchronizes the dirty page log for all address spaces.
*
* @last_stage: whether this is the last stage of live migration
*/ */
void memory_global_dirty_log_sync(void); void memory_global_dirty_log_sync(bool last_stage);
/** /**
* memory_global_dirty_log_sync: synchronize the dirty log for all memory * memory_global_dirty_log_sync: synchronize the dirty log for all memory

View file

@ -115,6 +115,7 @@ struct KVMState
} *as; } *as;
uint64_t kvm_dirty_ring_bytes; /* Size of the per-vcpu dirty ring */ uint64_t kvm_dirty_ring_bytes; /* Size of the per-vcpu dirty ring */
uint32_t kvm_dirty_ring_size; /* Number of dirty GFNs per ring */ uint32_t kvm_dirty_ring_size; /* Number of dirty GFNs per ring */
bool kvm_dirty_ring_with_bitmap;
struct KVMDirtyRingReaper reaper; struct KVMDirtyRingReaper reaper;
NotifyVmexitOption notify_vmexit; NotifyVmexitOption notify_vmexit;
uint32_t notify_window; uint32_t notify_window;

1
meson

@ -1 +0,0 @@
Subproject commit 3a9b285a55b91b53b2acda987192274352ecb5be

File diff suppressed because it is too large Load diff

View file

@ -12,8 +12,6 @@ option('pkgversion', type : 'string', value : '',
description: 'use specified string as sub-version of the package') description: 'use specified string as sub-version of the package')
option('smbd', type : 'string', value : '', option('smbd', type : 'string', value : '',
description: 'Path to smbd for slirp networking') description: 'Path to smbd for slirp networking')
option('sphinx_build', type : 'string', value : 'sphinx-build',
description: 'Use specified sphinx-build for building document')
option('iasl', type : 'string', value : '', option('iasl', type : 'string', value : '',
description: 'Path to ACPI disassembler') description: 'Path to ACPI disassembler')
option('tls_priority', type : 'string', value : 'NORMAL', option('tls_priority', type : 'string', value : 'NORMAL',
@ -33,6 +31,9 @@ option('fuzzing_engine', type : 'string', value : '',
description: 'fuzzing engine library for OSS-Fuzz') description: 'fuzzing engine library for OSS-Fuzz')
option('trace_file', type: 'string', value: 'trace', option('trace_file', type: 'string', value: 'trace',
description: 'Trace file prefix for simple backend') description: 'Trace file prefix for simple backend')
option('coroutine_backend', type: 'combo',
choices: ['ucontext', 'sigaltstack', 'windows', 'auto'],
value: 'auto', description: 'coroutine backend to use')
# Everything else can be set via --enable/--disable-* option # Everything else can be set via --enable/--disable-* option
# on the configure script command line. After adding an option # on the configure script command line. After adding an option
@ -44,6 +45,8 @@ option('fuzzing', type : 'boolean', value: false,
description: 'build fuzzing targets') description: 'build fuzzing targets')
option('gettext', type : 'feature', value : 'auto', option('gettext', type : 'feature', value : 'auto',
description: 'Localization of the GTK+ user interface') description: 'Localization of the GTK+ user interface')
option('modules', type : 'feature', value : 'disabled',
description: 'modules support (non Windows)')
option('module_upgrades', type : 'boolean', value : false, option('module_upgrades', type : 'boolean', value : false,
description: 'try to load modules from alternate paths for upgrades') description: 'try to load modules from alternate paths for upgrades')
option('install_blobs', type : 'boolean', value : true, option('install_blobs', type : 'boolean', value : true,
@ -82,6 +85,14 @@ option('tcg', type: 'feature', value: 'enabled',
description: 'TCG support') description: 'TCG support')
option('tcg_interpreter', type: 'boolean', value: false, option('tcg_interpreter', type: 'boolean', value: false,
description: 'TCG with bytecode interpreter (slow)') description: 'TCG with bytecode interpreter (slow)')
option('safe_stack', type: 'boolean', value: false,
description: 'SafeStack Stack Smash Protection (requires clang/llvm and coroutine backend ucontext)')
option('sanitizers', type: 'boolean', value: false,
description: 'enable default sanitizers')
option('tsan', type: 'boolean', value: false,
description: 'enable thread sanitizer')
option('stack_protector', type: 'feature', value: 'auto',
description: 'compiler-provided stack protection')
option('cfi', type: 'boolean', value: false, option('cfi', type: 'boolean', value: false,
description: 'Control-Flow Integrity (CFI)') description: 'Control-Flow Integrity (CFI)')
option('cfi_debug', type: 'boolean', value: false, option('cfi_debug', type: 'boolean', value: false,

View file

@ -101,7 +101,7 @@ void global_dirty_log_change(unsigned int flag, bool start)
static void global_dirty_log_sync(unsigned int flag, bool one_shot) static void global_dirty_log_sync(unsigned int flag, bool one_shot)
{ {
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
memory_global_dirty_log_sync(); memory_global_dirty_log_sync(false);
if (one_shot) { if (one_shot) {
memory_global_dirty_log_stop(flag); memory_global_dirty_log_stop(flag);
} }
@ -581,7 +581,7 @@ static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config)
* skip it unconditionally and start dirty tracking * skip it unconditionally and start dirty tracking
* from 2'round of log sync * from 2'round of log sync
*/ */
memory_global_dirty_log_sync(); memory_global_dirty_log_sync(false);
/* /*
* reset page protect manually and unconditionally. * reset page protect manually and unconditionally.

View file

@ -1039,7 +1039,7 @@ static void migration_trigger_throttle(RAMState *rs)
} }
} }
static void migration_bitmap_sync(RAMState *rs) static void migration_bitmap_sync(RAMState *rs, bool last_stage)
{ {
RAMBlock *block; RAMBlock *block;
int64_t end_time; int64_t end_time;
@ -1051,7 +1051,7 @@ static void migration_bitmap_sync(RAMState *rs)
} }
trace_migration_bitmap_sync_start(); trace_migration_bitmap_sync_start();
memory_global_dirty_log_sync(); memory_global_dirty_log_sync(last_stage);
qemu_mutex_lock(&rs->bitmap_mutex); qemu_mutex_lock(&rs->bitmap_mutex);
WITH_RCU_READ_LOCK_GUARD() { WITH_RCU_READ_LOCK_GUARD() {
@ -1086,7 +1086,7 @@ static void migration_bitmap_sync(RAMState *rs)
} }
} }
static void migration_bitmap_sync_precopy(RAMState *rs) static void migration_bitmap_sync_precopy(RAMState *rs, bool last_stage)
{ {
Error *local_err = NULL; Error *local_err = NULL;
@ -1099,7 +1099,7 @@ static void migration_bitmap_sync_precopy(RAMState *rs)
local_err = NULL; local_err = NULL;
} }
migration_bitmap_sync(rs); migration_bitmap_sync(rs, last_stage);
if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) { if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) {
error_report_err(local_err); error_report_err(local_err);
@ -2699,7 +2699,7 @@ void ram_postcopy_send_discard_bitmap(MigrationState *ms)
RCU_READ_LOCK_GUARD(); RCU_READ_LOCK_GUARD();
/* This should be our last sync, the src is now paused */ /* This should be our last sync, the src is now paused */
migration_bitmap_sync(rs); migration_bitmap_sync(rs, false);
/* Easiest way to make sure we don't resume in the middle of a host-page */ /* Easiest way to make sure we don't resume in the middle of a host-page */
rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL; rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL;
@ -2890,7 +2890,7 @@ static void ram_init_bitmaps(RAMState *rs)
/* We don't use dirty log with background snapshots */ /* We don't use dirty log with background snapshots */
if (!migrate_background_snapshot()) { if (!migrate_background_snapshot()) {
memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION); memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION);
migration_bitmap_sync_precopy(rs); migration_bitmap_sync_precopy(rs, false);
} }
} }
qemu_mutex_unlock_ramlist(); qemu_mutex_unlock_ramlist();
@ -3214,7 +3214,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
WITH_RCU_READ_LOCK_GUARD() { WITH_RCU_READ_LOCK_GUARD() {
if (!migration_in_postcopy()) { if (!migration_in_postcopy()) {
migration_bitmap_sync_precopy(rs); migration_bitmap_sync_precopy(rs, true);
} }
ram_control_before_iterate(f, RAM_CONTROL_FINISH); ram_control_before_iterate(f, RAM_CONTROL_FINISH);
@ -3288,7 +3288,7 @@ static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy,
if (!migration_in_postcopy() && remaining_size < s->threshold_size) { if (!migration_in_postcopy() && remaining_size < s->threshold_size) {
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
WITH_RCU_READ_LOCK_GUARD() { WITH_RCU_READ_LOCK_GUARD() {
migration_bitmap_sync_precopy(rs); migration_bitmap_sync_precopy(rs, false);
} }
qemu_mutex_unlock_iothread(); qemu_mutex_unlock_iothread();
remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
@ -3523,7 +3523,7 @@ void colo_incoming_start_dirty_log(void)
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
qemu_mutex_lock_ramlist(); qemu_mutex_lock_ramlist();
memory_global_dirty_log_sync(); memory_global_dirty_log_sync(false);
WITH_RCU_READ_LOCK_GUARD() { WITH_RCU_READ_LOCK_GUARD() {
RAMBLOCK_FOREACH_NOT_IGNORED(block) { RAMBLOCK_FOREACH_NOT_IGNORED(block) {
ramblock_sync_dirty_bitmap(ram_state, block); ramblock_sync_dirty_bitmap(ram_state, block);
@ -3813,7 +3813,7 @@ void colo_flush_ram_cache(void)
void *src_host; void *src_host;
unsigned long offset = 0; unsigned long offset = 0;
memory_global_dirty_log_sync(); memory_global_dirty_log_sync(false);
qemu_mutex_lock(&ram_state->bitmap_mutex); qemu_mutex_lock(&ram_state->bitmap_mutex);
WITH_RCU_READ_LOCK_GUARD() { WITH_RCU_READ_LOCK_GUARD() {
RAMBLOCK_FOREACH_NOT_IGNORED(block) { RAMBLOCK_FOREACH_NOT_IGNORED(block) {

View file

@ -9,14 +9,14 @@ help:
@echo "make check-minreqs:" @echo "make check-minreqs:"
@echo " Run tests in the minreqs virtual environment." @echo " Run tests in the minreqs virtual environment."
@echo " These tests use the oldest dependencies." @echo " These tests use the oldest dependencies."
@echo " Requires: Python 3.6" @echo " Requires: Python 3.7"
@echo " Hint (Fedora): 'sudo dnf install python3.6'" @echo " Hint (Fedora): 'sudo dnf install python3.7'"
@echo "" @echo ""
@echo "make check-tox:" @echo "make check-tox:"
@echo " Run tests against multiple python versions." @echo " Run tests against multiple python versions."
@echo " These tests use the newest dependencies." @echo " These tests use the newest dependencies."
@echo " Requires: Python 3.6 - 3.10, and tox." @echo " Requires: Python 3.7 - 3.11, and tox."
@echo " Hint (Fedora): 'sudo dnf install python3-tox python3.10'" @echo " Hint (Fedora): 'sudo dnf install python3-tox python3.11'"
@echo " The variable QEMU_TOX_EXTRA_ARGS can be use to pass extra" @echo " The variable QEMU_TOX_EXTRA_ARGS can be use to pass extra"
@echo " arguments to tox". @echo " arguments to tox".
@echo "" @echo ""
@ -54,18 +54,21 @@ pipenv check-pipenv:
@echo "pipenv was dropped; try 'make check-minreqs' or 'make min-venv'" @echo "pipenv was dropped; try 'make check-minreqs' or 'make min-venv'"
@exit 1 @exit 1
PIP_INSTALL = pip install --disable-pip-version-check
.PHONY: min-venv .PHONY: min-venv
min-venv: $(QEMU_MINVENV_DIR) $(QEMU_MINVENV_DIR)/bin/activate min-venv: $(QEMU_MINVENV_DIR) $(QEMU_MINVENV_DIR)/bin/activate
$(QEMU_MINVENV_DIR) $(QEMU_MINVENV_DIR)/bin/activate: setup.cfg tests/minreqs.txt $(QEMU_MINVENV_DIR) $(QEMU_MINVENV_DIR)/bin/activate: setup.cfg tests/minreqs.txt
@echo "VENV $(QEMU_MINVENV_DIR)" @echo "VENV $(QEMU_MINVENV_DIR)"
@python3.6 -m venv $(QEMU_MINVENV_DIR) @python3.7 -m venv $(QEMU_MINVENV_DIR)
@( \ @( \
echo "ACTIVATE $(QEMU_MINVENV_DIR)"; \ echo "ACTIVATE $(QEMU_MINVENV_DIR)"; \
. $(QEMU_MINVENV_DIR)/bin/activate; \ . $(QEMU_MINVENV_DIR)/bin/activate; \
echo "INSTALL wheel $(QEMU_MINVENV_DIR)"; \
$(PIP_INSTALL) wheel 1>/dev/null; \
echo "INSTALL -r tests/minreqs.txt $(QEMU_MINVENV_DIR)";\ echo "INSTALL -r tests/minreqs.txt $(QEMU_MINVENV_DIR)";\
pip install -r tests/minreqs.txt 1>/dev/null; \ $(PIP_INSTALL) -r tests/minreqs.txt 1>/dev/null; \
echo "INSTALL -e qemu $(QEMU_MINVENV_DIR)"; \ echo "INSTALL -e qemu $(QEMU_MINVENV_DIR)"; \
pip install -e . 1>/dev/null; \ $(PIP_INSTALL) -e . 1>/dev/null; \
) )
@touch $(QEMU_MINVENV_DIR) @touch $(QEMU_MINVENV_DIR)
@ -100,7 +103,7 @@ check-dev: dev-venv
.PHONY: develop .PHONY: develop
develop: develop:
pip3 install --disable-pip-version-check -e .[devel] $(PIP_INSTALL) -e .[devel]
.PHONY: check .PHONY: check
check: check:

897
python/scripts/mkvenv.py Normal file
View file

@ -0,0 +1,897 @@
"""
mkvenv - QEMU pyvenv bootstrapping utility
usage: mkvenv [-h] command ...
QEMU pyvenv bootstrapping utility
options:
-h, --help show this help message and exit
Commands:
command Description
create create a venv
post_init
post-venv initialization
ensure Ensure that the specified package is installed.
--------------------------------------------------
usage: mkvenv create [-h] target
positional arguments:
target Target directory to install virtual environment into.
options:
-h, --help show this help message and exit
--------------------------------------------------
usage: mkvenv post_init [-h]
options:
-h, --help show this help message and exit
--------------------------------------------------
usage: mkvenv ensure [-h] [--online] [--dir DIR] dep_spec...
positional arguments:
dep_spec PEP 508 Dependency specification, e.g. 'meson>=0.61.5'
options:
-h, --help show this help message and exit
--online Install packages from PyPI, if necessary.
--dir DIR Path to vendored packages where we may install from.
"""
# Copyright (C) 2022-2023 Red Hat, Inc.
#
# Authors:
# John Snow <jsnow@redhat.com>
# Paolo Bonzini <pbonzini@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2 or
# later. See the COPYING file in the top-level directory.
import argparse
from importlib.util import find_spec
import logging
import os
from pathlib import Path
import re
import shutil
import site
import subprocess
import sys
import sysconfig
from types import SimpleNamespace
from typing import (
Any,
Iterator,
Optional,
Sequence,
Tuple,
Union,
)
import venv
import warnings
# Try to load distlib, with a fallback to pip's vendored version.
# HAVE_DISTLIB is checked below, just-in-time, so that mkvenv does not fail
# outside the venv or before a potential call to ensurepip in checkpip().
HAVE_DISTLIB = True
try:
import distlib.database
import distlib.scripts
import distlib.version
except ImportError:
try:
# Reach into pip's cookie jar. pylint and flake8 don't understand
# that these imports will be used via distlib.xxx.
from pip._vendor import distlib
import pip._vendor.distlib.database # noqa, pylint: disable=unused-import
import pip._vendor.distlib.scripts # noqa, pylint: disable=unused-import
import pip._vendor.distlib.version # noqa, pylint: disable=unused-import
except ImportError:
HAVE_DISTLIB = False
# Do not add any mandatory dependencies from outside the stdlib:
# This script *must* be usable standalone!
DirType = Union[str, bytes, "os.PathLike[str]", "os.PathLike[bytes]"]
logger = logging.getLogger("mkvenv")
def inside_a_venv() -> bool:
"""Returns True if it is executed inside of a virtual environment."""
return sys.prefix != sys.base_prefix
class Ouch(RuntimeError):
"""An Exception class we can't confuse with a builtin."""
class QemuEnvBuilder(venv.EnvBuilder):
"""
An extension of venv.EnvBuilder for building QEMU's configure-time venv.
The primary difference is that it emulates a "nested" virtual
environment when invoked from inside of an existing virtual
environment by including packages from the parent. Also,
"ensurepip" is replaced if possible with just recreating pip's
console_scripts inside the virtual environment.
Parameters for base class init:
- system_site_packages: bool = False
- clear: bool = False
- symlinks: bool = False
- upgrade: bool = False
- with_pip: bool = False
- prompt: Optional[str] = None
- upgrade_deps: bool = False (Since 3.9)
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
logger.debug("QemuEnvBuilder.__init__(...)")
# For nested venv emulation:
self.use_parent_packages = False
if inside_a_venv():
# Include parent packages only if we're in a venv and
# system_site_packages was True.
self.use_parent_packages = kwargs.pop(
"system_site_packages", False
)
# Include system_site_packages only when the parent,
# The venv we are currently in, also does so.
kwargs["system_site_packages"] = sys.base_prefix in site.PREFIXES
# ensurepip is slow: venv creation can be very fast for cases where
# we allow the use of system_site_packages. Therefore, ensurepip is
# replaced with our own script generation once the virtual environment
# is setup.
self.want_pip = kwargs.get("with_pip", False)
if self.want_pip:
if (
kwargs.get("system_site_packages", False)
and not need_ensurepip()
):
kwargs["with_pip"] = False
else:
check_ensurepip(suggest_remedy=True)
super().__init__(*args, **kwargs)
# Make the context available post-creation:
self._context: Optional[SimpleNamespace] = None
def get_parent_libpath(self) -> Optional[str]:
"""Return the libpath of the parent venv, if applicable."""
if self.use_parent_packages:
return sysconfig.get_path("purelib")
return None
@staticmethod
def compute_venv_libpath(context: SimpleNamespace) -> str:
"""
Compatibility wrapper for context.lib_path for Python < 3.12
"""
# Python 3.12+, not strictly necessary because it's documented
# to be the same as 3.10 code below:
if sys.version_info >= (3, 12):
return context.lib_path
# Python 3.10+
if "venv" in sysconfig.get_scheme_names():
lib_path = sysconfig.get_path(
"purelib", scheme="venv", vars={"base": context.env_dir}
)
assert lib_path is not None
return lib_path
# For Python <= 3.9 we need to hardcode this. Fortunately the
# code below was the same in Python 3.6-3.10, so there is only
# one case.
if sys.platform == "win32":
return os.path.join(context.env_dir, "Lib", "site-packages")
return os.path.join(
context.env_dir,
"lib",
"python%d.%d" % sys.version_info[:2],
"site-packages",
)
def ensure_directories(self, env_dir: DirType) -> SimpleNamespace:
logger.debug("ensure_directories(env_dir=%s)", env_dir)
self._context = super().ensure_directories(env_dir)
return self._context
def create(self, env_dir: DirType) -> None:
logger.debug("create(env_dir=%s)", env_dir)
super().create(env_dir)
assert self._context is not None
self.post_post_setup(self._context)
def post_post_setup(self, context: SimpleNamespace) -> None:
"""
The final, final hook. Enter the venv and run commands inside of it.
"""
if self.use_parent_packages:
# We're inside of a venv and we want to include the parent
# venv's packages.
parent_libpath = self.get_parent_libpath()
assert parent_libpath is not None
logger.debug("parent_libpath: %s", parent_libpath)
our_libpath = self.compute_venv_libpath(context)
logger.debug("our_libpath: %s", our_libpath)
pth_file = os.path.join(our_libpath, "nested.pth")
with open(pth_file, "w", encoding="UTF-8") as file:
file.write(parent_libpath + os.linesep)
if self.want_pip:
args = [
context.env_exe,
__file__,
"post_init",
]
subprocess.run(args, check=True)
def get_value(self, field: str) -> str:
"""
Get a string value from the context namespace after a call to build.
For valid field names, see:
https://docs.python.org/3/library/venv.html#venv.EnvBuilder.ensure_directories
"""
ret = getattr(self._context, field)
assert isinstance(ret, str)
return ret
def need_ensurepip() -> bool:
"""
Tests for the presence of setuptools and pip.
:return: `True` if we do not detect both packages.
"""
# Don't try to actually import them, it's fraught with danger:
# https://github.com/pypa/setuptools/issues/2993
if find_spec("setuptools") and find_spec("pip"):
return False
return True
def check_ensurepip(prefix: str = "", suggest_remedy: bool = False) -> None:
"""
Check that we have ensurepip.
Raise a fatal exception with a helpful hint if it isn't available.
"""
if not find_spec("ensurepip"):
msg = (
"Python's ensurepip module is not found.\n"
"It's normally part of the Python standard library, "
"maybe your distribution packages it separately?\n"
"(Debian puts ensurepip in its python3-venv package.)\n"
)
if suggest_remedy:
msg += (
"Either install ensurepip, or alleviate the need for it in the"
" first place by installing pip and setuptools for "
f"'{sys.executable}'.\n"
)
raise Ouch(prefix + msg)
# ensurepip uses pyexpat, which can also go missing on us:
if not find_spec("pyexpat"):
msg = (
"Python's pyexpat module is not found.\n"
"It's normally part of the Python standard library, "
"maybe your distribution packages it separately?\n"
"(NetBSD's pkgsrc debundles this to e.g. 'py310-expat'.)\n"
)
if suggest_remedy:
msg += (
"Either install pyexpat, or alleviate the need for it in the "
"first place by installing pip and setuptools for "
f"'{sys.executable}'.\n"
)
raise Ouch(prefix + msg)
def make_venv( # pylint: disable=too-many-arguments
env_dir: Union[str, Path],
system_site_packages: bool = False,
clear: bool = True,
symlinks: Optional[bool] = None,
with_pip: bool = True,
) -> None:
"""
Create a venv using `QemuEnvBuilder`.
This is analogous to the `venv.create` module-level convenience
function that is part of the Python stdblib, except it uses
`QemuEnvBuilder` instead.
:param env_dir: The directory to create/install to.
:param system_site_packages:
Allow inheriting packages from the system installation.
:param clear: When True, fully remove any prior venv and files.
:param symlinks:
Whether to use symlinks to the target interpreter or not. If
left unspecified, it will use symlinks except on Windows to
match behavior with the "venv" CLI tool.
:param with_pip:
Whether to install "pip" binaries or not.
"""
logger.debug(
"%s: make_venv(env_dir=%s, system_site_packages=%s, "
"clear=%s, symlinks=%s, with_pip=%s)",
__file__,
str(env_dir),
system_site_packages,
clear,
symlinks,
with_pip,
)
if symlinks is None:
# Default behavior of standard venv CLI
symlinks = os.name != "nt"
builder = QemuEnvBuilder(
system_site_packages=system_site_packages,
clear=clear,
symlinks=symlinks,
with_pip=with_pip,
)
style = "non-isolated" if builder.system_site_packages else "isolated"
nested = ""
if builder.use_parent_packages:
nested = f"(with packages from '{builder.get_parent_libpath()}') "
print(
f"mkvenv: Creating {style} virtual environment"
f" {nested}at '{str(env_dir)}'",
file=sys.stderr,
)
try:
logger.debug("Invoking builder.create()")
try:
builder.create(str(env_dir))
except SystemExit as exc:
# Some versions of the venv module raise SystemExit; *nasty*!
# We want the exception that prompted it. It might be a subprocess
# error that has output we *really* want to see.
logger.debug("Intercepted SystemExit from EnvBuilder.create()")
raise exc.__cause__ or exc.__context__ or exc
logger.debug("builder.create() finished")
except subprocess.CalledProcessError as exc:
logger.error("mkvenv subprocess failed:")
logger.error("cmd: %s", exc.cmd)
logger.error("returncode: %d", exc.returncode)
def _stringify(data: Union[str, bytes]) -> str:
if isinstance(data, bytes):
return data.decode()
return data
lines = []
if exc.stdout:
lines.append("========== stdout ==========")
lines.append(_stringify(exc.stdout))
lines.append("============================")
if exc.stderr:
lines.append("========== stderr ==========")
lines.append(_stringify(exc.stderr))
lines.append("============================")
if lines:
logger.error(os.linesep.join(lines))
raise Ouch("VENV creation subprocess failed.") from exc
# print the python executable to stdout for configure.
print(builder.get_value("env_exe"))
def _gen_importlib(packages: Sequence[str]) -> Iterator[str]:
# pylint: disable=import-outside-toplevel
# pylint: disable=no-name-in-module
# pylint: disable=import-error
try:
# First preference: Python 3.8+ stdlib
from importlib.metadata import ( # type: ignore
PackageNotFoundError,
distribution,
)
except ImportError as exc:
logger.debug("%s", str(exc))
# Second preference: Commonly available PyPI backport
from importlib_metadata import ( # type: ignore
PackageNotFoundError,
distribution,
)
def _generator() -> Iterator[str]:
for package in packages:
try:
entry_points = distribution(package).entry_points
except PackageNotFoundError:
continue
# The EntryPoints type is only available in 3.10+,
# treat this as a vanilla list and filter it ourselves.
entry_points = filter(
lambda ep: ep.group == "console_scripts", entry_points
)
for entry_point in entry_points:
yield f"{entry_point.name} = {entry_point.value}"
return _generator()
def _gen_pkg_resources(packages: Sequence[str]) -> Iterator[str]:
# pylint: disable=import-outside-toplevel
# Bundled with setuptools; has a good chance of being available.
import pkg_resources
def _generator() -> Iterator[str]:
for package in packages:
try:
eps = pkg_resources.get_entry_map(package, "console_scripts")
except pkg_resources.DistributionNotFound:
continue
for entry_point in eps.values():
yield str(entry_point)
return _generator()
def generate_console_scripts(
packages: Sequence[str],
python_path: Optional[str] = None,
bin_path: Optional[str] = None,
) -> None:
"""
Generate script shims for console_script entry points in @packages.
"""
if python_path is None:
python_path = sys.executable
if bin_path is None:
bin_path = sysconfig.get_path("scripts")
assert bin_path is not None
logger.debug(
"generate_console_scripts(packages=%s, python_path=%s, bin_path=%s)",
packages,
python_path,
bin_path,
)
if not packages:
return
def _get_entry_points() -> Iterator[str]:
"""Python 3.7 compatibility shim for iterating entry points."""
# Python 3.8+, or Python 3.7 with importlib_metadata installed.
try:
return _gen_importlib(packages)
except ImportError as exc:
logger.debug("%s", str(exc))
# Python 3.7 with setuptools installed.
try:
return _gen_pkg_resources(packages)
except ImportError as exc:
logger.debug("%s", str(exc))
raise Ouch(
"Neither importlib.metadata nor pkg_resources found, "
"can't generate console script shims.\n"
"Use Python 3.8+, or install importlib-metadata or setuptools."
) from exc
maker = distlib.scripts.ScriptMaker(None, bin_path)
maker.variants = {""}
maker.clobber = False
for entry_point in _get_entry_points():
for filename in maker.make(entry_point):
logger.debug("wrote console_script '%s'", filename)
def checkpip() -> bool:
"""
Debian10 has a pip that's broken when used inside of a virtual environment.
We try to detect and correct that case here.
"""
try:
# pylint: disable=import-outside-toplevel,unused-import,import-error
# pylint: disable=redefined-outer-name
import pip._internal # type: ignore # noqa: F401
logger.debug("pip appears to be working correctly.")
return False
except ModuleNotFoundError as exc:
if exc.name == "pip._internal":
# Uh, fair enough. They did say "internal".
# Let's just assume it's fine.
return False
logger.warning("pip appears to be malfunctioning: %s", str(exc))
check_ensurepip("pip appears to be non-functional, and ")
logger.debug("Attempting to repair pip ...")
subprocess.run(
(sys.executable, "-m", "ensurepip"),
stdout=subprocess.DEVNULL,
check=True,
)
logger.debug("Pip is now (hopefully) repaired!")
return True
def pkgname_from_depspec(dep_spec: str) -> str:
"""
Parse package name out of a PEP-508 depspec.
See https://peps.python.org/pep-0508/#names
"""
match = re.match(
r"^([A-Z0-9]([A-Z0-9._-]*[A-Z0-9])?)", dep_spec, re.IGNORECASE
)
if not match:
raise ValueError(
f"dep_spec '{dep_spec}'"
" does not appear to contain a valid package name"
)
return match.group(0)
def diagnose(
dep_spec: str,
online: bool,
wheels_dir: Optional[Union[str, Path]],
prog: Optional[str],
) -> Tuple[str, bool]:
"""
Offer a summary to the user as to why a package failed to be installed.
:param dep_spec: The package we tried to ensure, e.g. 'meson>=0.61.5'
:param online: Did we allow PyPI access?
:param prog:
Optionally, a shell program name that can be used as a
bellwether to detect if this program is installed elsewhere on
the system. This is used to offer advice when a program is
detected for a different python version.
:param wheels_dir:
Optionally, a directory that was searched for vendored packages.
"""
# pylint: disable=too-many-branches
# Some errors are not particularly serious
bad = False
pkg_name = pkgname_from_depspec(dep_spec)
pkg_version = None
has_importlib = False
try:
# Python 3.8+ stdlib
# pylint: disable=import-outside-toplevel
# pylint: disable=no-name-in-module
# pylint: disable=import-error
from importlib.metadata import ( # type: ignore
PackageNotFoundError,
version,
)
has_importlib = True
try:
pkg_version = version(pkg_name)
except PackageNotFoundError:
pass
except ModuleNotFoundError:
pass
lines = []
if pkg_version:
lines.append(
f"Python package '{pkg_name}' version '{pkg_version}' was found,"
" but isn't suitable."
)
elif has_importlib:
lines.append(
f"Python package '{pkg_name}' was not found nor installed."
)
else:
lines.append(
f"Python package '{pkg_name}' is either not found or"
" not a suitable version."
)
if wheels_dir:
lines.append(
"No suitable version found in, or failed to install from"
f" '{wheels_dir}'."
)
bad = True
if online:
lines.append("A suitable version could not be obtained from PyPI.")
bad = True
else:
lines.append(
"mkvenv was configured to operate offline and did not check PyPI."
)
if prog and not pkg_version:
which = shutil.which(prog)
if which:
if sys.base_prefix in site.PREFIXES:
pypath = Path(sys.executable).resolve()
lines.append(
f"'{prog}' was detected on your system at '{which}', "
f"but the Python package '{pkg_name}' was not found by "
f"this Python interpreter ('{pypath}'). "
f"Typically this means that '{prog}' has been installed "
"against a different Python interpreter on your system."
)
else:
lines.append(
f"'{prog}' was detected on your system at '{which}', "
"but the build is using an isolated virtual environment."
)
bad = True
lines = [f"{line}" for line in lines]
if bad:
lines.insert(0, f"Could not provide build dependency '{dep_spec}':")
else:
lines.insert(0, f"'{dep_spec}' not found:")
return os.linesep.join(lines), bad
def pip_install(
args: Sequence[str],
online: bool = False,
wheels_dir: Optional[Union[str, Path]] = None,
) -> None:
"""
Use pip to install a package or package(s) as specified in @args.
"""
loud = bool(
os.environ.get("DEBUG")
or os.environ.get("GITLAB_CI")
or os.environ.get("V")
)
full_args = [
sys.executable,
"-m",
"pip",
"install",
"--disable-pip-version-check",
"-v" if loud else "-q",
]
if not online:
full_args += ["--no-index"]
if wheels_dir:
full_args += ["--find-links", f"file://{str(wheels_dir)}"]
full_args += list(args)
subprocess.run(
full_args,
check=True,
)
def _do_ensure(
dep_specs: Sequence[str],
online: bool = False,
wheels_dir: Optional[Union[str, Path]] = None,
) -> None:
"""
Use pip to ensure we have the package specified by @dep_specs.
If the package is already installed, do nothing. If online and
wheels_dir are both provided, prefer packages found in wheels_dir
first before connecting to PyPI.
:param dep_specs:
PEP 508 dependency specifications. e.g. ['meson>=0.61.5'].
:param online: If True, fall back to PyPI.
:param wheels_dir: If specified, search this path for packages.
"""
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", category=UserWarning, module="distlib"
)
dist_path = distlib.database.DistributionPath(include_egg=True)
absent = []
present = []
for spec in dep_specs:
matcher = distlib.version.LegacyMatcher(spec)
dist = dist_path.get_distribution(matcher.name)
if dist is None or not matcher.match(dist.version):
absent.append(spec)
else:
logger.info("found %s", dist)
present.append(matcher.name)
if present:
generate_console_scripts(present)
if absent:
# Some packages are missing or aren't a suitable version,
# install a suitable (possibly vendored) package.
print(f"mkvenv: installing {', '.join(absent)}", file=sys.stderr)
pip_install(args=absent, online=online, wheels_dir=wheels_dir)
def ensure(
dep_specs: Sequence[str],
online: bool = False,
wheels_dir: Optional[Union[str, Path]] = None,
prog: Optional[str] = None,
) -> None:
"""
Use pip to ensure we have the package specified by @dep_specs.
If the package is already installed, do nothing. If online and
wheels_dir are both provided, prefer packages found in wheels_dir
first before connecting to PyPI.
:param dep_specs:
PEP 508 dependency specifications. e.g. ['meson>=0.61.5'].
:param online: If True, fall back to PyPI.
:param wheels_dir: If specified, search this path for packages.
:param prog:
If specified, use this program name for error diagnostics that will
be presented to the user. e.g., 'sphinx-build' can be used as a
bellwether for the presence of 'sphinx'.
"""
print(f"mkvenv: checking for {', '.join(dep_specs)}", file=sys.stderr)
if not HAVE_DISTLIB:
raise Ouch("a usable distlib could not be found, please install it")
try:
_do_ensure(dep_specs, online, wheels_dir)
except subprocess.CalledProcessError as exc:
# Well, that's not good.
msg, bad = diagnose(dep_specs[0], online, wheels_dir, prog)
if bad:
raise Ouch(msg) from exc
raise SystemExit(f"\n{msg}\n\n") from exc
def post_venv_setup() -> None:
"""
This is intended to be run *inside the venv* after it is created.
"""
logger.debug("post_venv_setup()")
# Test for a broken pip (Debian 10 or derivative?) and fix it if needed
if not checkpip():
# Finally, generate a 'pip' script so the venv is usable in a normal
# way from the CLI. This only happens when we inherited pip from a
# parent/system-site and haven't run ensurepip in some way.
generate_console_scripts(["pip"])
def _add_create_subcommand(subparsers: Any) -> None:
subparser = subparsers.add_parser("create", help="create a venv")
subparser.add_argument(
"target",
type=str,
action="store",
help="Target directory to install virtual environment into.",
)
def _add_post_init_subcommand(subparsers: Any) -> None:
subparsers.add_parser("post_init", help="post-venv initialization")
def _add_ensure_subcommand(subparsers: Any) -> None:
subparser = subparsers.add_parser(
"ensure", help="Ensure that the specified package is installed."
)
subparser.add_argument(
"--online",
action="store_true",
help="Install packages from PyPI, if necessary.",
)
subparser.add_argument(
"--dir",
type=str,
action="store",
help="Path to vendored packages where we may install from.",
)
subparser.add_argument(
"--diagnose",
type=str,
action="store",
help=(
"Name of a shell utility to use for "
"diagnostics if this command fails."
),
)
subparser.add_argument(
"dep_specs",
type=str,
action="store",
help="PEP 508 Dependency specification, e.g. 'meson>=0.61.5'",
nargs="+",
)
def main() -> int:
"""CLI interface to make_qemu_venv. See module docstring."""
if os.environ.get("DEBUG") or os.environ.get("GITLAB_CI"):
# You're welcome.
logging.basicConfig(level=logging.DEBUG)
else:
if os.environ.get("V"):
logging.basicConfig(level=logging.INFO)
# These are incredibly noisy even for V=1
logging.getLogger("distlib.metadata").addFilter(lambda record: False)
logging.getLogger("distlib.database").addFilter(lambda record: False)
parser = argparse.ArgumentParser(
prog="mkvenv",
description="QEMU pyvenv bootstrapping utility",
)
subparsers = parser.add_subparsers(
title="Commands",
dest="command",
required=True,
metavar="command",
help="Description",
)
_add_create_subcommand(subparsers)
_add_post_init_subcommand(subparsers)
_add_ensure_subcommand(subparsers)
args = parser.parse_args()
try:
if args.command == "create":
make_venv(
args.target,
system_site_packages=True,
clear=True,
)
if args.command == "post_init":
post_venv_setup()
if args.command == "ensure":
ensure(
dep_specs=args.dep_specs,
online=args.online,
wheels_dir=args.dir,
prog=args.diagnose,
)
logger.debug("mkvenv.py %s: exiting", args.command)
except Ouch as exc:
print("\n*** Ouch! ***\n", file=sys.stderr)
print(str(exc), "\n\n", file=sys.stderr)
return 1
except SystemExit:
raise
except: # pylint: disable=bare-except
logger.exception("mkvenv did not complete successfully:")
return 2
return 0
if __name__ == "__main__":
sys.exit(main())

74
python/scripts/vendor.py Executable file
View file

@ -0,0 +1,74 @@
#!/usr/bin/env python3
"""
vendor - QEMU python vendoring utility
usage: vendor [-h]
QEMU python vendoring utility
options:
-h, --help show this help message and exit
"""
# Copyright (C) 2023 Red Hat, Inc.
#
# Authors:
# John Snow <jsnow@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2 or
# later. See the COPYING file in the top-level directory.
import argparse
import os
from pathlib import Path
import subprocess
import sys
import tempfile
def main() -> int:
"""Run the vendoring utility. See module-level docstring."""
loud = False
if os.environ.get("DEBUG") or os.environ.get("V"):
loud = True
# No options or anything for now, but I guess
# you'll figure that out when you run --help.
parser = argparse.ArgumentParser(
prog="vendor",
description="QEMU python vendoring utility",
)
parser.parse_args()
packages = {
"meson==0.63.3":
"d677b809c4895dcbaac9bf6c43703fcb3609a4b24c6057c78f828590049cf43a",
}
vendor_dir = Path(__file__, "..", "..", "wheels").resolve()
with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8") as file:
for dep_spec, checksum in packages.items():
file.write(f"{dep_spec} --hash=sha256:{checksum}")
file.flush()
cli_args = [
"pip",
"download",
"--dest",
str(vendor_dir),
"--require-hashes",
"-r",
file.name,
]
if loud:
cli_args.append("-v")
print(" ".join(cli_args))
subprocess.run(cli_args, check=True)
return 0
if __name__ == "__main__":
sys.exit(main())

View file

@ -14,7 +14,6 @@ classifiers =
Natural Language :: English Natural Language :: English
Operating System :: OS Independent Operating System :: OS Independent
Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.9
@ -23,7 +22,7 @@ classifiers =
Typing :: Typed Typing :: Typed
[options] [options]
python_requires = >= 3.6 python_requires = >= 3.7
packages = packages =
qemu.qmp qemu.qmp
qemu.machine qemu.machine
@ -36,11 +35,12 @@ packages =
# Remember to update tests/minreqs.txt if changing anything below: # Remember to update tests/minreqs.txt if changing anything below:
devel = devel =
avocado-framework >= 90.0 avocado-framework >= 90.0
flake8 >= 3.6.0 distlib >= 0.3.6
flake8 >= 5.0.4
fusepy >= 2.0.4 fusepy >= 2.0.4
isort >= 5.1.2 isort >= 5.1.2
mypy >= 0.780 mypy >= 0.780
pylint >= 2.8.0 pylint >= 2.17.3
tox >= 3.18.0 tox >= 3.18.0
urwid >= 2.1.2 urwid >= 2.1.2
urwid-readline >= 0.13 urwid-readline >= 0.13
@ -76,7 +76,7 @@ exclude = __pycache__,
[mypy] [mypy]
strict = True strict = True
python_version = 3.6 python_version = 3.7
warn_unused_configs = True warn_unused_configs = True
namespace_packages = True namespace_packages = True
warn_unused_ignores = False warn_unused_ignores = False
@ -103,6 +103,39 @@ ignore_missing_imports = True
[mypy-pygments] [mypy-pygments]
ignore_missing_imports = True ignore_missing_imports = True
[mypy-importlib.metadata]
ignore_missing_imports = True
[mypy-importlib_metadata]
ignore_missing_imports = True
[mypy-pkg_resources]
ignore_missing_imports = True
[mypy-distlib]
ignore_missing_imports = True
[mypy-distlib.database]
ignore_missing_imports = True
[mypy-distlib.scripts]
ignore_missing_imports = True
[mypy-distlib.version]
ignore_missing_imports = True
[mypy-pip._vendor.distlib]
ignore_missing_imports = True
[mypy-pip._vendor.distlib.database]
ignore_missing_imports = True
[mypy-pip._vendor.distlib.scripts]
ignore_missing_imports = True
[mypy-pip._vendor.distlib.version]
ignore_missing_imports = True
[pylint.messages control] [pylint.messages control]
# Disable the message, report, category or checker with the given id(s). You # Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this # can either give multiple identifiers separated by comma (,) or put this
@ -132,6 +165,7 @@ good-names=i,
fd, # fd = os.open(...) fd, # fd = os.open(...)
c, # for c in string: ... c, # for c in string: ...
T, # for TypeVars. See pylint#3401 T, # for TypeVars. See pylint#3401
SocketAddrT, # Not sure why this is invalid.
[pylint.similarities] [pylint.similarities]
# Ignore imports when computing similarities. # Ignore imports when computing similarities.
@ -158,7 +192,7 @@ multi_line_output=3
# of python available on your system to run this test. # of python available on your system to run this test.
[tox:tox] [tox:tox]
envlist = py36, py37, py38, py39, py310, py311 envlist = py37, py38, py39, py310, py311
skip_missing_interpreters = true skip_missing_interpreters = true
[testenv] [testenv]

View file

@ -1,2 +1,3 @@
#!/bin/sh -e #!/bin/sh -e
python3 -m flake8 qemu/ python3 -m flake8 qemu/
python3 -m flake8 scripts/

View file

@ -1,2 +1,3 @@
#!/bin/sh -e #!/bin/sh -e
python3 -m isort -c qemu/ python3 -m isort -c qemu/
python3 -m isort -c scripts/

View file

@ -1,5 +1,5 @@
# This file lists the ***oldest possible dependencies*** needed to run # This file lists the ***oldest possible dependencies*** needed to run
# "make check" successfully under ***Python 3.6***. It is used primarily # "make check" successfully under ***Python 3.7***. It is used primarily
# by GitLab CI to ensure that our stated minimum versions in setup.cfg # by GitLab CI to ensure that our stated minimum versions in setup.cfg
# are truthful and regularly validated. # are truthful and regularly validated.
# #
@ -16,6 +16,9 @@ urwid==2.1.2
urwid-readline==0.13 urwid-readline==0.13
Pygments==2.9.0 Pygments==2.9.0
# Dependencies for mkvenv
distlib==0.3.6
# Dependencies for FUSE support for qom-fuse # Dependencies for FUSE support for qom-fuse
fusepy==2.0.4 fusepy==2.0.4
@ -23,23 +26,23 @@ fusepy==2.0.4
avocado-framework==90.0 avocado-framework==90.0
# Linters # Linters
flake8==3.6.0 flake8==5.0.4
isort==5.1.2 isort==5.1.2
mypy==0.780 mypy==0.780
pylint==2.8.0 pylint==2.17.3
# Transitive flake8 dependencies # Transitive flake8 dependencies
mccabe==0.6.0 mccabe==0.7.0
pycodestyle==2.4.0 pycodestyle==2.9.1
pyflakes==2.0.0 pyflakes==2.5.0
# Transitive mypy dependencies # Transitive mypy dependencies
mypy-extensions==0.4.3 mypy-extensions==0.4.3
typed-ast==1.4.0 typed-ast==1.4.0
typing-extensions==3.7.4 typing-extensions==4.5.0
# Transitive pylint dependencies # Transitive pylint dependencies
astroid==2.5.4 astroid==2.15.4
lazy-object-proxy==1.4.0 lazy-object-proxy==1.4.0
toml==0.10.0 toml==0.10.0
wrapt==1.12.1 wrapt==1.12.1

View file

@ -1,2 +1,3 @@
#!/bin/sh -e #!/bin/sh -e
python3 -m mypy -p qemu python3 -m mypy -p qemu
python3 -m mypy scripts/

View file

@ -1,3 +1,4 @@
#!/bin/sh -e #!/bin/sh -e
# See commit message for environment variable explainer. # See commit message for environment variable explainer.
SETUPTOOLS_USE_DISTUTILS=stdlib python3 -m pylint qemu/ SETUPTOOLS_USE_DISTUTILS=stdlib python3 -m pylint qemu/
SETUPTOOLS_USE_DISTUTILS=stdlib python3 -m pylint scripts/

Binary file not shown.

View file

@ -22,7 +22,7 @@ have_qga_vss = get_option('qga_vss') \
Then run configure with: --extra-cxxflags="-isystem /path/to/vss/inc/win2003"''') \ Then run configure with: --extra-cxxflags="-isystem /path/to/vss/inc/win2003"''') \
.require(midl.found() or widl.found(), .require(midl.found() or widl.found(),
error_message: 'VSS support requires midl or widl') \ error_message: 'VSS support requires midl or widl') \
.require(not enable_static, .require(not get_option('prefer_static'),
error_message: 'VSS support requires dynamic linking with GLib') \ error_message: 'VSS support requires dynamic linking with GLib') \
.allowed() .allowed()

View file

@ -4,7 +4,7 @@
# KVM and x86_64, or tests that are generic enough to be valid for all # KVM and x86_64, or tests that are generic enough to be valid for all
# targets. Such a test list can be generated with: # targets. Such a test list can be generated with:
# #
# ./tests/venv/bin/avocado list --filter-by-tags-include-empty \ # ./pyvenv/bin/avocado list --filter-by-tags-include-empty \
# --filter-by-tags-include-empty-key -t accel:kvm,arch:x86_64 \ # --filter-by-tags-include-empty-key -t accel:kvm,arch:x86_64 \
# tests/avocado/ # tests/avocado/
# #
@ -22,7 +22,7 @@
# - tests/avocado/virtio_check_params.py:VirtioMaxSegSettingsCheck.test_machine_types # - tests/avocado/virtio_check_params.py:VirtioMaxSegSettingsCheck.test_machine_types
# #
make get-vm-images make get-vm-images
./tests/venv/bin/avocado run \ ./pyvenv/bin/avocado run \
--job-results-dir=tests/results/ \ --job-results-dir=tests/results/ \
tests/avocado/boot_linux.py:BootLinuxX8664.test_pc_i440fx_kvm \ tests/avocado/boot_linux.py:BootLinuxX8664.test_pc_i440fx_kvm \
tests/avocado/boot_linux.py:BootLinuxX8664.test_pc_q35_kvm \ tests/avocado/boot_linux.py:BootLinuxX8664.test_pc_q35_kvm \

View file

@ -24,6 +24,9 @@ hppa
i386 i386
~ (/qemu)?((/include)?/hw/i386/.*|/target/i386/.*|/hw/intc/[^/]*apic[^/]*\.c) ~ (/qemu)?((/include)?/hw/i386/.*|/target/i386/.*|/hw/intc/[^/]*apic[^/]*\.c)
loongarch
~ (/qemu)?((/include)?/hw/(loongarch/.*|.*/loongarch.*)|/target/loongarch/.*)
m68k m68k
~ (/qemu)?((/include)?/hw/m68k/.*|/target/m68k/.*|(/include)?/hw(/.*)?/mcf.*|(/include)?/hw/nubus/.*) ~ (/qemu)?((/include)?/hw/m68k/.*|/target/m68k/.*|(/include)?/hw(/.*)?/mcf.*|(/include)?/hw/nubus/.*)
@ -36,11 +39,14 @@ mips
nios2 nios2
~ (/qemu)?((/include)?/hw/nios2/.*|/target/nios2/.*) ~ (/qemu)?((/include)?/hw/nios2/.*|/target/nios2/.*)
openrisc
~ (/qemu)?((/include)?/hw/openrisc/.*|/target/openrisc/.*)
ppc ppc
~ (/qemu)?((/include)?/hw/ppc/.*|/target/ppc/.*|/hw/pci-host/(uninorth.*|dec.*|prep.*|ppc.*)|/hw/misc/macio/.*|(/include)?/hw/.*/(xics|openpic|spapr).*) ~ (/qemu)?((/include)?/hw/ppc/.*|/target/ppc/.*|/hw/pci-host/(uninorth.*|dec.*|prep.*|ppc.*)|/hw/misc/macio/.*|(/include)?/hw/.*/(xics|openpic|spapr).*)
riscv riscv
~ (/qemu)?((/include)?/hw/riscv/.*|/target/riscv/.*) ~ (/qemu)?((/include)?/hw/riscv/.*|/target/riscv/.*|/hw/.*/(riscv_|ibex_|sifive_).*)
rx rx
~ (/qemu)?((/include)?/hw/rx/.*|/target/rx/.*) ~ (/qemu)?((/include)?/hw/rx/.*|/target/rx/.*)
@ -54,12 +60,12 @@ sh4
sparc sparc
~ (/qemu)?((/include)?/hw/sparc(64)?.*|/target/sparc/.*|/hw/.*/grlib.*|/hw/display/cg3.c) ~ (/qemu)?((/include)?/hw/sparc(64)?.*|/target/sparc/.*|/hw/.*/grlib.*|/hw/display/cg3.c)
tilegx
~ (/qemu)?(/target/tilegx/.*)
tricore tricore
~ (/qemu)?((/include)?/hw/tricore/.*|/target/tricore/.*) ~ (/qemu)?((/include)?/hw/tricore/.*|/target/tricore/.*)
xtensa
~ (/qemu)?((/include)?/hw/xtensa/.*|/target/xtensa/.*)
9pfs 9pfs
~ (/qemu)?(/hw/9pfs/.*|/fsdev/.*) ~ (/qemu)?(/hw/9pfs/.*|/fsdev/.*)
@ -73,7 +79,7 @@ char
~ (/qemu)?(/qemu-char\.c|/include/sysemu/char\.h|(/include)?/hw/char/.*) ~ (/qemu)?(/qemu-char\.c|/include/sysemu/char\.h|(/include)?/hw/char/.*)
crypto crypto
~ (/qemu)?((/include)?/crypto/.*|/hw/.*/crypto.*) ~ (/qemu)?((/include)?/crypto/.*|/hw/.*/.*crypto.*|(/include/sysemu|/backends)/cryptodev.*)
disas disas
~ (/qemu)?((/include)?/disas.*) ~ (/qemu)?((/include)?/disas.*)
@ -100,7 +106,7 @@ net
~ (/qemu)?((/include)?(/hw)?/(net|rdma)/.*) ~ (/qemu)?((/include)?(/hw)?/(net|rdma)/.*)
pci pci
~ (/qemu)?(/hw/pci.*|/include/hw/pci.*) ~ (/qemu)?(/include)?/hw/(cxl/|pci).*
qemu-ga qemu-ga
~ (/qemu)?(/qga/.*) ~ (/qemu)?(/qga/.*)
@ -108,9 +114,6 @@ qemu-ga
scsi scsi
~ (/qemu)?(/scsi/.*|/hw/scsi/.*|/include/hw/scsi/.*) ~ (/qemu)?(/scsi/.*|/hw/scsi/.*|/include/hw/scsi/.*)
tcg
~ (/qemu)?(/accel/tcg/.*|/replay/.*|/(.*/)?softmmu.*)
trace trace
~ (/qemu)?(/.*trace.*\.[ch]) ~ (/qemu)?(/.*trace.*\.[ch])
@ -126,9 +129,27 @@ user
util util
~ (/qemu)?(/util/.*|/include/qemu/.*) ~ (/qemu)?(/util/.*|/include/qemu/.*)
vfio
~ (/qemu)?(/include)?/hw/vfio/.*
virtio
~ (/qemu)?(/include)?/hw/virtio/.*
xen xen
~ (/qemu)?(.*/xen.*) ~ (/qemu)?(.*/xen.*)
hvf
~ (/qemu)?(.*/hvf.*)
kvm
~ (/qemu)?(.*/kvm.*)
tcg
~ (/qemu)?(/accel/tcg|/replay|/tcg)/.*
sysemu
~ (/qemu)?(/softmmu/.*|/accel/.*)
(headers) (headers)
~ (/qemu)?(/include/.*) ~ (/qemu)?(/include/.*)
@ -137,9 +158,3 @@ testlibs
tests tests
~ (/qemu)?(/tests/.*) ~ (/qemu)?(/tests/.*)
loongarch
~ (/qemu)?((/include)?/hw/(loongarch/.*|.*/loongarch.*)|/target/loongarch/.*)
riscv
~ (/qemu)?((/include)?/hw/riscv/.*|/target/riscv/.*|/hw/.*/(riscv_|ibex_|sifive_).*)

View file

@ -43,7 +43,7 @@ except ModuleNotFoundError as exc:
print(f"Module '{exc.name}' not found.") print(f"Module '{exc.name}' not found.")
print(" Try 'make check-venv' from your build directory,") print(" Try 'make check-venv' from your build directory,")
print(" and then one way to run this script is like so:") print(" and then one way to run this script is like so:")
print(f' > $builddir/tests/venv/bin/python3 "{path}"') print(f' > $builddir/pyvenv/bin/python3 "{path}"')
sys.exit(1) sys.exit(1)
logger = logging.getLogger('device-crash-test') logger = logging.getLogger('device-crash-test')

View file

@ -35,6 +35,8 @@ SKIP_OPTIONS = {
OPTION_NAMES = { OPTION_NAMES = {
"b_coverage": "gcov", "b_coverage": "gcov",
"b_lto": "lto", "b_lto": "lto",
"coroutine_backend": "with-coroutine",
"debug": "debug-info",
"malloc": "enable-malloc", "malloc": "enable-malloc",
"pkgversion": "with-pkgversion", "pkgversion": "with-pkgversion",
"qemu_firmwarepath": "firmwarepath", "qemu_firmwarepath": "firmwarepath",
@ -46,6 +48,7 @@ BUILTIN_OPTIONS = {
"b_coverage", "b_coverage",
"b_lto", "b_lto",
"datadir", "datadir",
"debug",
"includedir", "includedir",
"libdir", "libdir",
"libexecdir", "libexecdir",

View file

@ -1,8 +1,8 @@
# This file is generated by meson-buildoptions.py, do not edit! # This file is generated by meson-buildoptions.py, do not edit!
meson_options_help() { meson_options_help() {
printf "%s\n" ' --audio-drv-list=CHOICES Set audio driver list [default] (choices: al' printf "%s\n" ' --audio-drv-list=CHOICES Set audio driver list [default] (choices: alsa/co'
printf "%s\n" ' sa/coreaudio/default/dsound/jack/oss/pa/' printf "%s\n" ' reaudio/default/dsound/jack/oss/pa/pipewire/sdl/s'
printf "%s\n" ' pipewire/sdl/sndio)' printf "%s\n" ' ndio)'
printf "%s\n" ' --block-drv-ro-whitelist=VALUE' printf "%s\n" ' --block-drv-ro-whitelist=VALUE'
printf "%s\n" ' set block driver read-only whitelist (by default' printf "%s\n" ' set block driver read-only whitelist (by default'
printf "%s\n" ' affects only QEMU, not tools like qemu-img)' printf "%s\n" ' affects only QEMU, not tools like qemu-img)'
@ -11,6 +11,7 @@ meson_options_help() {
printf "%s\n" ' affects only QEMU, not tools like qemu-img)' printf "%s\n" ' affects only QEMU, not tools like qemu-img)'
printf "%s\n" ' --datadir=VALUE Data file directory [share]' printf "%s\n" ' --datadir=VALUE Data file directory [share]'
printf "%s\n" ' --disable-coroutine-pool coroutine freelist (better performance)' printf "%s\n" ' --disable-coroutine-pool coroutine freelist (better performance)'
printf "%s\n" ' --disable-debug-info Enable debug symbols and other information'
printf "%s\n" ' --disable-hexagon-idef-parser' printf "%s\n" ' --disable-hexagon-idef-parser'
printf "%s\n" ' use idef-parser to automatically generate TCG' printf "%s\n" ' use idef-parser to automatically generate TCG'
printf "%s\n" ' code for the Hexagon frontend' printf "%s\n" ' code for the Hexagon frontend'
@ -41,11 +42,15 @@ meson_options_help() {
printf "%s\n" ' --enable-profiler profiler support' printf "%s\n" ' --enable-profiler profiler support'
printf "%s\n" ' --enable-rng-none dummy RNG, avoid using /dev/(u)random and' printf "%s\n" ' --enable-rng-none dummy RNG, avoid using /dev/(u)random and'
printf "%s\n" ' getrandom()' printf "%s\n" ' getrandom()'
printf "%s\n" ' --enable-safe-stack SafeStack Stack Smash Protection (requires'
printf "%s\n" ' clang/llvm and coroutine backend ucontext)'
printf "%s\n" ' --enable-sanitizers enable default sanitizers'
printf "%s\n" ' --enable-strip Strip targets on install' printf "%s\n" ' --enable-strip Strip targets on install'
printf "%s\n" ' --enable-tcg-interpreter TCG with bytecode interpreter (slow)' printf "%s\n" ' --enable-tcg-interpreter TCG with bytecode interpreter (slow)'
printf "%s\n" ' --enable-trace-backends=CHOICES' printf "%s\n" ' --enable-trace-backends=CHOICES'
printf "%s\n" ' Set available tracing backends [log] (choices:' printf "%s\n" ' Set available tracing backends [log] (choices:'
printf "%s\n" ' dtrace/ftrace/log/nop/simple/syslog/ust)' printf "%s\n" ' dtrace/ftrace/log/nop/simple/syslog/ust)'
printf "%s\n" ' --enable-tsan enable thread sanitizer'
printf "%s\n" ' --firmwarepath=VALUES search PATH for firmware files [share/qemu-' printf "%s\n" ' --firmwarepath=VALUES search PATH for firmware files [share/qemu-'
printf "%s\n" ' firmware]' printf "%s\n" ' firmware]'
printf "%s\n" ' --iasl=VALUE Path to ACPI disassembler' printf "%s\n" ' --iasl=VALUE Path to ACPI disassembler'
@ -57,11 +62,11 @@ meson_options_help() {
printf "%s\n" ' --localedir=VALUE Locale data directory [share/locale]' printf "%s\n" ' --localedir=VALUE Locale data directory [share/locale]'
printf "%s\n" ' --localstatedir=VALUE Localstate data directory [/var/local]' printf "%s\n" ' --localstatedir=VALUE Localstate data directory [/var/local]'
printf "%s\n" ' --mandir=VALUE Manual page directory [share/man]' printf "%s\n" ' --mandir=VALUE Manual page directory [share/man]'
printf "%s\n" ' --sphinx-build=VALUE Use specified sphinx-build for building document'
printf "%s\n" ' [sphinx-build]'
printf "%s\n" ' --sysconfdir=VALUE Sysconf data directory [etc]' printf "%s\n" ' --sysconfdir=VALUE Sysconf data directory [etc]'
printf "%s\n" ' --tls-priority=VALUE Default TLS protocol/cipher priority string' printf "%s\n" ' --tls-priority=VALUE Default TLS protocol/cipher priority string'
printf "%s\n" ' [NORMAL]' printf "%s\n" ' [NORMAL]'
printf "%s\n" ' --with-coroutine=CHOICE coroutine backend to use (choices:'
printf "%s\n" ' auto/sigaltstack/ucontext/windows)'
printf "%s\n" ' --with-pkgversion=VALUE use specified string as sub-version of the' printf "%s\n" ' --with-pkgversion=VALUE use specified string as sub-version of the'
printf "%s\n" ' package' printf "%s\n" ' package'
printf "%s\n" ' --with-trace-file=VALUE Trace file prefix for simple backend [trace]' printf "%s\n" ' --with-trace-file=VALUE Trace file prefix for simple backend [trace]'
@ -129,6 +134,7 @@ meson_options_help() {
printf "%s\n" ' lzo lzo compression support' printf "%s\n" ' lzo lzo compression support'
printf "%s\n" ' malloc-trim enable libc malloc_trim() for memory optimization' printf "%s\n" ' malloc-trim enable libc malloc_trim() for memory optimization'
printf "%s\n" ' membarrier membarrier system call (for Linux 4.14+ or Windows' printf "%s\n" ' membarrier membarrier system call (for Linux 4.14+ or Windows'
printf "%s\n" ' modules modules support (non Windows)'
printf "%s\n" ' mpath Multipath persistent reservation passthrough' printf "%s\n" ' mpath Multipath persistent reservation passthrough'
printf "%s\n" ' multiprocess Out of process device emulation support' printf "%s\n" ' multiprocess Out of process device emulation support'
printf "%s\n" ' netmap netmap network backend support' printf "%s\n" ' netmap netmap network backend support'
@ -160,6 +166,7 @@ meson_options_help() {
printf "%s\n" ' sparse sparse checker' printf "%s\n" ' sparse sparse checker'
printf "%s\n" ' spice Spice server support' printf "%s\n" ' spice Spice server support'
printf "%s\n" ' spice-protocol Spice protocol support' printf "%s\n" ' spice-protocol Spice protocol support'
printf "%s\n" ' stack-protector compiler-provided stack protection'
printf "%s\n" ' tcg TCG support' printf "%s\n" ' tcg TCG support'
printf "%s\n" ' tools build support utilities that come with QEMU' printf "%s\n" ' tools build support utilities that come with QEMU'
printf "%s\n" ' tpm TPM support' printf "%s\n" ' tpm TPM support'
@ -247,6 +254,7 @@ _meson_option_parse() {
--disable-cocoa) printf "%s" -Dcocoa=disabled ;; --disable-cocoa) printf "%s" -Dcocoa=disabled ;;
--enable-coreaudio) printf "%s" -Dcoreaudio=enabled ;; --enable-coreaudio) printf "%s" -Dcoreaudio=enabled ;;
--disable-coreaudio) printf "%s" -Dcoreaudio=disabled ;; --disable-coreaudio) printf "%s" -Dcoreaudio=disabled ;;
--with-coroutine=*) quote_sh "-Dcoroutine_backend=$2" ;;
--enable-coroutine-pool) printf "%s" -Dcoroutine_pool=true ;; --enable-coroutine-pool) printf "%s" -Dcoroutine_pool=true ;;
--disable-coroutine-pool) printf "%s" -Dcoroutine_pool=false ;; --disable-coroutine-pool) printf "%s" -Dcoroutine_pool=false ;;
--enable-crypto-afalg) printf "%s" -Dcrypto_afalg=enabled ;; --enable-crypto-afalg) printf "%s" -Dcrypto_afalg=enabled ;;
@ -258,6 +266,8 @@ _meson_option_parse() {
--datadir=*) quote_sh "-Ddatadir=$2" ;; --datadir=*) quote_sh "-Ddatadir=$2" ;;
--enable-dbus-display) printf "%s" -Ddbus_display=enabled ;; --enable-dbus-display) printf "%s" -Ddbus_display=enabled ;;
--disable-dbus-display) printf "%s" -Ddbus_display=disabled ;; --disable-dbus-display) printf "%s" -Ddbus_display=disabled ;;
--enable-debug-info) printf "%s" -Ddebug=true ;;
--disable-debug-info) printf "%s" -Ddebug=false ;;
--enable-debug-graph-lock) printf "%s" -Ddebug_graph_lock=true ;; --enable-debug-graph-lock) printf "%s" -Ddebug_graph_lock=true ;;
--disable-debug-graph-lock) printf "%s" -Ddebug_graph_lock=false ;; --disable-debug-graph-lock) printf "%s" -Ddebug_graph_lock=false ;;
--enable-debug-mutex) printf "%s" -Ddebug_mutex=true ;; --enable-debug-mutex) printf "%s" -Ddebug_mutex=true ;;
@ -361,6 +371,8 @@ _meson_option_parse() {
--disable-membarrier) printf "%s" -Dmembarrier=disabled ;; --disable-membarrier) printf "%s" -Dmembarrier=disabled ;;
--enable-module-upgrades) printf "%s" -Dmodule_upgrades=true ;; --enable-module-upgrades) printf "%s" -Dmodule_upgrades=true ;;
--disable-module-upgrades) printf "%s" -Dmodule_upgrades=false ;; --disable-module-upgrades) printf "%s" -Dmodule_upgrades=false ;;
--enable-modules) printf "%s" -Dmodules=enabled ;;
--disable-modules) printf "%s" -Dmodules=disabled ;;
--enable-mpath) printf "%s" -Dmpath=enabled ;; --enable-mpath) printf "%s" -Dmpath=enabled ;;
--disable-mpath) printf "%s" -Dmpath=disabled ;; --disable-mpath) printf "%s" -Dmpath=disabled ;;
--enable-multiprocess) printf "%s" -Dmultiprocess=enabled ;; --enable-multiprocess) printf "%s" -Dmultiprocess=enabled ;;
@ -407,6 +419,10 @@ _meson_option_parse() {
--disable-replication) printf "%s" -Dreplication=disabled ;; --disable-replication) printf "%s" -Dreplication=disabled ;;
--enable-rng-none) printf "%s" -Drng_none=true ;; --enable-rng-none) printf "%s" -Drng_none=true ;;
--disable-rng-none) printf "%s" -Drng_none=false ;; --disable-rng-none) printf "%s" -Drng_none=false ;;
--enable-safe-stack) printf "%s" -Dsafe_stack=true ;;
--disable-safe-stack) printf "%s" -Dsafe_stack=false ;;
--enable-sanitizers) printf "%s" -Dsanitizers=true ;;
--disable-sanitizers) printf "%s" -Dsanitizers=false ;;
--enable-sdl) printf "%s" -Dsdl=enabled ;; --enable-sdl) printf "%s" -Dsdl=enabled ;;
--disable-sdl) printf "%s" -Dsdl=disabled ;; --disable-sdl) printf "%s" -Dsdl=disabled ;;
--enable-sdl-image) printf "%s" -Dsdl_image=enabled ;; --enable-sdl-image) printf "%s" -Dsdl_image=enabled ;;
@ -427,11 +443,12 @@ _meson_option_parse() {
--disable-sndio) printf "%s" -Dsndio=disabled ;; --disable-sndio) printf "%s" -Dsndio=disabled ;;
--enable-sparse) printf "%s" -Dsparse=enabled ;; --enable-sparse) printf "%s" -Dsparse=enabled ;;
--disable-sparse) printf "%s" -Dsparse=disabled ;; --disable-sparse) printf "%s" -Dsparse=disabled ;;
--sphinx-build=*) quote_sh "-Dsphinx_build=$2" ;;
--enable-spice) printf "%s" -Dspice=enabled ;; --enable-spice) printf "%s" -Dspice=enabled ;;
--disable-spice) printf "%s" -Dspice=disabled ;; --disable-spice) printf "%s" -Dspice=disabled ;;
--enable-spice-protocol) printf "%s" -Dspice_protocol=enabled ;; --enable-spice-protocol) printf "%s" -Dspice_protocol=enabled ;;
--disable-spice-protocol) printf "%s" -Dspice_protocol=disabled ;; --disable-spice-protocol) printf "%s" -Dspice_protocol=disabled ;;
--enable-stack-protector) printf "%s" -Dstack_protector=enabled ;;
--disable-stack-protector) printf "%s" -Dstack_protector=disabled ;;
--enable-strip) printf "%s" -Dstrip=true ;; --enable-strip) printf "%s" -Dstrip=true ;;
--disable-strip) printf "%s" -Dstrip=false ;; --disable-strip) printf "%s" -Dstrip=false ;;
--sysconfdir=*) quote_sh "-Dsysconfdir=$2" ;; --sysconfdir=*) quote_sh "-Dsysconfdir=$2" ;;
@ -446,6 +463,8 @@ _meson_option_parse() {
--disable-tpm) printf "%s" -Dtpm=disabled ;; --disable-tpm) printf "%s" -Dtpm=disabled ;;
--enable-trace-backends=*) quote_sh "-Dtrace_backends=$2" ;; --enable-trace-backends=*) quote_sh "-Dtrace_backends=$2" ;;
--with-trace-file=*) quote_sh "-Dtrace_file=$2" ;; --with-trace-file=*) quote_sh "-Dtrace_file=$2" ;;
--enable-tsan) printf "%s" -Dtsan=true ;;
--disable-tsan) printf "%s" -Dtsan=false ;;
--enable-u2f) printf "%s" -Du2f=enabled ;; --enable-u2f) printf "%s" -Du2f=enabled ;;
--disable-u2f) printf "%s" -Du2f=disabled ;; --disable-u2f) printf "%s" -Du2f=disabled ;;
--enable-usb-redir) printf "%s" -Dusb_redir=enabled ;; --enable-usb-redir) printf "%s" -Dusb_redir=enabled ;;

View file

@ -1,7 +1,7 @@
[mypy] [mypy]
strict = True strict = True
disallow_untyped_calls = False disallow_untyped_calls = False
python_version = 3.6 python_version = 3.7
[mypy-qapi.schema] [mypy-qapi.schema]
disallow_untyped_defs = False disallow_untyped_defs = False

View file

@ -2253,7 +2253,7 @@ void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
* If memory region `mr' is NULL, do global sync. Otherwise, sync * If memory region `mr' is NULL, do global sync. Otherwise, sync
* dirty bitmap for the specified memory region. * dirty bitmap for the specified memory region.
*/ */
static void memory_region_sync_dirty_bitmap(MemoryRegion *mr) static void memory_region_sync_dirty_bitmap(MemoryRegion *mr, bool last_stage)
{ {
MemoryListener *listener; MemoryListener *listener;
AddressSpace *as; AddressSpace *as;
@ -2283,7 +2283,7 @@ static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
* is to do a global sync, because we are not capable to * is to do a global sync, because we are not capable to
* sync in a finer granularity. * sync in a finer granularity.
*/ */
listener->log_sync_global(listener); listener->log_sync_global(listener, last_stage);
trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 1); trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 1);
} }
} }
@ -2347,7 +2347,7 @@ DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
{ {
DirtyBitmapSnapshot *snapshot; DirtyBitmapSnapshot *snapshot;
assert(mr->ram_block); assert(mr->ram_block);
memory_region_sync_dirty_bitmap(mr); memory_region_sync_dirty_bitmap(mr, false);
snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client); snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
memory_global_after_dirty_log_sync(); memory_global_after_dirty_log_sync();
return snapshot; return snapshot;
@ -2873,9 +2873,9 @@ bool memory_region_present(MemoryRegion *container, hwaddr addr)
return mr && mr != container; return mr && mr != container;
} }
void memory_global_dirty_log_sync(void) void memory_global_dirty_log_sync(bool last_stage)
{ {
memory_region_sync_dirty_bitmap(NULL); memory_region_sync_dirty_bitmap(NULL, last_stage);
} }
void memory_global_after_dirty_log_sync(void) void memory_global_after_dirty_log_sync(void)

View file

@ -863,7 +863,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
"tsx-ldtrk", NULL, NULL /* pconfig */, "arch-lbr", "tsx-ldtrk", NULL, NULL /* pconfig */, "arch-lbr",
NULL, NULL, "amx-bf16", "avx512-fp16", NULL, NULL, "amx-bf16", "avx512-fp16",
"amx-tile", "amx-int8", "spec-ctrl", "stibp", "amx-tile", "amx-int8", "spec-ctrl", "stibp",
NULL, "arch-capabilities", "core-capability", "ssbd", "flush-l1d", "arch-capabilities", "core-capability", "ssbd",
}, },
.cpuid = { .cpuid = {
.eax = 7, .eax = 7,
@ -1050,7 +1050,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
"ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl", "ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl",
"taa-no", NULL, NULL, NULL, "taa-no", NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, "fb-clear", NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,

View file

@ -899,6 +899,8 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
#define CPUID_7_0_EDX_SPEC_CTRL (1U << 26) #define CPUID_7_0_EDX_SPEC_CTRL (1U << 26)
/* Single Thread Indirect Branch Predictors */ /* Single Thread Indirect Branch Predictors */
#define CPUID_7_0_EDX_STIBP (1U << 27) #define CPUID_7_0_EDX_STIBP (1U << 27)
/* Flush L1D cache */
#define CPUID_7_0_EDX_FLUSH_L1D (1U << 28)
/* Arch Capabilities */ /* Arch Capabilities */
#define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29) #define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29)
/* Core Capability */ /* Core Capability */
@ -1016,6 +1018,7 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
#define MSR_ARCH_CAP_PSCHANGE_MC_NO (1U << 6) #define MSR_ARCH_CAP_PSCHANGE_MC_NO (1U << 6)
#define MSR_ARCH_CAP_TSX_CTRL_MSR (1U << 7) #define MSR_ARCH_CAP_TSX_CTRL_MSR (1U << 7)
#define MSR_ARCH_CAP_TAA_NO (1U << 8) #define MSR_ARCH_CAP_TAA_NO (1U << 8)
#define MSR_ARCH_CAP_FB_CLEAR (1U << 17)
#define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5) #define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5)

View file

@ -2497,6 +2497,14 @@ void helper_vpermdq_ymm(Reg *d, Reg *v, Reg *s, uint32_t order)
d->Q(1) = r1; d->Q(1) = r1;
d->Q(2) = r2; d->Q(2) = r2;
d->Q(3) = r3; d->Q(3) = r3;
if (order & 0x8) {
d->Q(0) = 0;
d->Q(1) = 0;
}
if (order & 0x80) {
d->Q(2) = 0;
d->Q(3) = 0;
}
} }
void helper_vpermq_ymm(Reg *d, Reg *s, uint32_t order) void helper_vpermq_ymm(Reg *d, Reg *s, uint32_t order)

View file

@ -237,7 +237,7 @@ static void decode_group14(DisasContext *s, CPUX86State *env, X86OpEntry *entry,
static void decode_0F6F(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) static void decode_0F6F(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
{ {
static const X86OpEntry opcodes_0F6F[4] = { static const X86OpEntry opcodes_0F6F[4] = {
X86_OP_ENTRY3(MOVDQ, P,q, None,None, Q,q, vex1 mmx), /* movq */ X86_OP_ENTRY3(MOVDQ, P,q, None,None, Q,q, vex5 mmx), /* movq */
X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex1), /* movdqa */ X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex1), /* movdqa */
X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex4_unal), /* movdqu */ X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex4_unal), /* movdqu */
{}, {},
@ -274,9 +274,9 @@ static void decode_0F78(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui
{ {
static const X86OpEntry opcodes_0F78[4] = { static const X86OpEntry opcodes_0F78[4] = {
{}, {},
X86_OP_ENTRY3(EXTRQ_i, V,x, None,None, I,w, cpuid(SSE4A)), X86_OP_ENTRY3(EXTRQ_i, V,x, None,None, I,w, cpuid(SSE4A)), /* AMD extension */
{}, {},
X86_OP_ENTRY3(INSERTQ_i, V,x, U,x, I,w, cpuid(SSE4A)), X86_OP_ENTRY3(INSERTQ_i, V,x, U,x, I,w, cpuid(SSE4A)), /* AMD extension */
}; };
*entry = *decode_by_prefix(s, opcodes_0F78); *entry = *decode_by_prefix(s, opcodes_0F78);
} }
@ -284,9 +284,9 @@ static void decode_0F78(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui
static void decode_0F79(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) static void decode_0F79(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
{ {
if (s->prefix & PREFIX_REPNZ) { if (s->prefix & PREFIX_REPNZ) {
entry->gen = gen_INSERTQ_r; entry->gen = gen_INSERTQ_r; /* AMD extension */
} else if (s->prefix & PREFIX_DATA) { } else if (s->prefix & PREFIX_DATA) {
entry->gen = gen_EXTRQ_r; entry->gen = gen_EXTRQ_r; /* AMD extension */
} else { } else {
entry->gen = NULL; entry->gen = NULL;
}; };
@ -306,7 +306,7 @@ static void decode_0F7E(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui
static void decode_0F7F(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) static void decode_0F7F(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
{ {
static const X86OpEntry opcodes_0F7F[4] = { static const X86OpEntry opcodes_0F7F[4] = {
X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex1 mmx), /* movq */ X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex5 mmx), /* movq */
X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex1), /* movdqa */ X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex1), /* movdqa */
X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4_unal), /* movdqu */ X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4_unal), /* movdqu */
{}, {},
@ -639,15 +639,15 @@ static void decode_0F10(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui
static const X86OpEntry opcodes_0F10_reg[4] = { static const X86OpEntry opcodes_0F10_reg[4] = {
X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex4_unal), /* MOVUPS */ X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex4_unal), /* MOVUPS */
X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex4_unal), /* MOVUPD */ X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex4_unal), /* MOVUPD */
X86_OP_ENTRY3(VMOVSS, V,x, H,x, W,x, vex4), X86_OP_ENTRY3(VMOVSS, V,x, H,x, W,x, vex5),
X86_OP_ENTRY3(VMOVLPx, V,x, H,x, W,x, vex4), /* MOVSD */ X86_OP_ENTRY3(VMOVLPx, V,x, H,x, W,x, vex5), /* MOVSD */
}; };
static const X86OpEntry opcodes_0F10_mem[4] = { static const X86OpEntry opcodes_0F10_mem[4] = {
X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex4_unal), /* MOVUPS */ X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex4_unal), /* MOVUPS */
X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex4_unal), /* MOVUPD */ X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex4_unal), /* MOVUPD */
X86_OP_ENTRY3(VMOVSS_ld, V,x, H,x, M,ss, vex4), X86_OP_ENTRY3(VMOVSS_ld, V,x, H,x, M,ss, vex5),
X86_OP_ENTRY3(VMOVSD_ld, V,x, H,x, M,sd, vex4), X86_OP_ENTRY3(VMOVSD_ld, V,x, H,x, M,sd, vex5),
}; };
if ((get_modrm(s, env) >> 6) == 3) { if ((get_modrm(s, env) >> 6) == 3) {
@ -660,17 +660,17 @@ static void decode_0F10(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui
static void decode_0F11(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) static void decode_0F11(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
{ {
static const X86OpEntry opcodes_0F11_reg[4] = { static const X86OpEntry opcodes_0F11_reg[4] = {
X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4), /* MOVPS */ X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4), /* MOVUPS */
X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4), /* MOVPD */ X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4), /* MOVUPD */
X86_OP_ENTRY3(VMOVSS, W,x, H,x, V,x, vex4), X86_OP_ENTRY3(VMOVSS, W,x, H,x, V,x, vex5),
X86_OP_ENTRY3(VMOVLPx, W,x, H,x, V,q, vex4), /* MOVSD */ X86_OP_ENTRY3(VMOVLPx, W,x, H,x, V,q, vex5), /* MOVSD */
}; };
static const X86OpEntry opcodes_0F11_mem[4] = { static const X86OpEntry opcodes_0F11_mem[4] = {
X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4), /* MOVPS */ X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4), /* MOVUPS */
X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4), /* MOVPD */ X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4), /* MOVUPD */
X86_OP_ENTRY3(VMOVSS_st, M,ss, None,None, V,x, vex4), X86_OP_ENTRY3(VMOVSS_st, M,ss, None,None, V,x, vex5),
X86_OP_ENTRY3(VMOVLPx_st, M,sd, None,None, V,x, vex4), /* MOVSD */ X86_OP_ENTRY3(VMOVLPx_st, M,sd, None,None, V,x, vex5), /* MOVSD */
}; };
if ((get_modrm(s, env) >> 6) == 3) { if ((get_modrm(s, env) >> 6) == 3) {
@ -687,16 +687,16 @@ static void decode_0F12(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui
* Use dq for operand for compatibility with gen_MOVSD and * Use dq for operand for compatibility with gen_MOVSD and
* to allow VEX128 only. * to allow VEX128 only.
*/ */
X86_OP_ENTRY3(VMOVLPx_ld, V,dq, H,dq, M,q, vex4), /* MOVLPS */ X86_OP_ENTRY3(VMOVLPx_ld, V,dq, H,dq, M,q, vex5), /* MOVLPS */
X86_OP_ENTRY3(VMOVLPx_ld, V,dq, H,dq, M,q, vex4), /* MOVLPD */ X86_OP_ENTRY3(VMOVLPx_ld, V,dq, H,dq, M,q, vex5), /* MOVLPD */
X86_OP_ENTRY3(VMOVSLDUP, V,x, None,None, W,x, vex4 cpuid(SSE3)), X86_OP_ENTRY3(VMOVSLDUP, V,x, None,None, W,x, vex4 cpuid(SSE3)),
X86_OP_ENTRY3(VMOVDDUP, V,x, None,None, WM,q, vex4 cpuid(SSE3)), /* qq if VEX.256 */ X86_OP_ENTRY3(VMOVDDUP, V,x, None,None, WM,q, vex5 cpuid(SSE3)), /* qq if VEX.256 */
}; };
static const X86OpEntry opcodes_0F12_reg[4] = { static const X86OpEntry opcodes_0F12_reg[4] = {
X86_OP_ENTRY3(VMOVHLPS, V,dq, H,dq, U,dq, vex4), X86_OP_ENTRY3(VMOVHLPS, V,dq, H,dq, U,dq, vex7),
X86_OP_ENTRY3(VMOVLPx, W,x, H,x, U,q, vex4), /* MOVLPD */ X86_OP_ENTRY3(VMOVLPx, W,x, H,x, U,q, vex5), /* MOVLPD */
X86_OP_ENTRY3(VMOVSLDUP, V,x, None,None, U,x, vex4 cpuid(SSE3)), X86_OP_ENTRY3(VMOVSLDUP, V,x, None,None, U,x, vex4 cpuid(SSE3)),
X86_OP_ENTRY3(VMOVDDUP, V,x, None,None, U,x, vex4 cpuid(SSE3)), X86_OP_ENTRY3(VMOVDDUP, V,x, None,None, U,x, vex5 cpuid(SSE3)),
}; };
if ((get_modrm(s, env) >> 6) == 3) { if ((get_modrm(s, env) >> 6) == 3) {
@ -716,15 +716,15 @@ static void decode_0F16(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui
* Operand 1 technically only reads the low 64 bits, but uses dq so that * Operand 1 technically only reads the low 64 bits, but uses dq so that
* it is easier to check for op0 == op1 in an endianness-neutral manner. * it is easier to check for op0 == op1 in an endianness-neutral manner.
*/ */
X86_OP_ENTRY3(VMOVHPx_ld, V,dq, H,dq, M,q, vex4), /* MOVHPS */ X86_OP_ENTRY3(VMOVHPx_ld, V,dq, H,dq, M,q, vex5), /* MOVHPS */
X86_OP_ENTRY3(VMOVHPx_ld, V,dq, H,dq, M,q, vex4), /* MOVHPD */ X86_OP_ENTRY3(VMOVHPx_ld, V,dq, H,dq, M,q, vex5), /* MOVHPD */
X86_OP_ENTRY3(VMOVSHDUP, V,x, None,None, W,x, vex4 cpuid(SSE3)), X86_OP_ENTRY3(VMOVSHDUP, V,x, None,None, W,x, vex4 cpuid(SSE3)),
{}, {},
}; };
static const X86OpEntry opcodes_0F16_reg[4] = { static const X86OpEntry opcodes_0F16_reg[4] = {
/* Same as above, operand 1 could be Hq if it wasn't for big-endian. */ /* Same as above, operand 1 could be Hq if it wasn't for big-endian. */
X86_OP_ENTRY3(VMOVLHPS, V,dq, H,dq, U,q, vex4), X86_OP_ENTRY3(VMOVLHPS, V,dq, H,dq, U,q, vex7),
X86_OP_ENTRY3(VMOVHPx, V,x, H,x, U,x, vex4), /* MOVHPD */ X86_OP_ENTRY3(VMOVHPx, V,x, H,x, U,x, vex5), /* MOVHPD */
X86_OP_ENTRY3(VMOVSHDUP, V,x, None,None, U,x, vex4 cpuid(SSE3)), X86_OP_ENTRY3(VMOVSHDUP, V,x, None,None, U,x, vex4 cpuid(SSE3)),
{}, {},
}; };
@ -750,8 +750,9 @@ static void decode_0F2A(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui
static void decode_0F2B(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) static void decode_0F2B(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
{ {
static const X86OpEntry opcodes_0F2B[4] = { static const X86OpEntry opcodes_0F2B[4] = {
X86_OP_ENTRY3(MOVDQ, M,x, None,None, V,x, vex4), /* MOVNTPS */ X86_OP_ENTRY3(MOVDQ, M,x, None,None, V,x, vex1), /* MOVNTPS */
X86_OP_ENTRY3(MOVDQ, M,x, None,None, V,x, vex4), /* MOVNTPD */ X86_OP_ENTRY3(MOVDQ, M,x, None,None, V,x, vex1), /* MOVNTPD */
/* AMD extensions */
X86_OP_ENTRY3(VMOVSS_st, M,ss, None,None, V,x, vex4 cpuid(SSE4A)), /* MOVNTSS */ X86_OP_ENTRY3(VMOVSS_st, M,ss, None,None, V,x, vex4 cpuid(SSE4A)), /* MOVNTSS */
X86_OP_ENTRY3(VMOVLPx_st, M,sd, None,None, V,x, vex4 cpuid(SSE4A)), /* MOVNTSD */ X86_OP_ENTRY3(VMOVLPx_st, M,sd, None,None, V,x, vex4 cpuid(SSE4A)), /* MOVNTSD */
}; };
@ -783,6 +784,17 @@ static void decode_0F2D(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui
*entry = *decode_by_prefix(s, opcodes_0F2D); *entry = *decode_by_prefix(s, opcodes_0F2D);
} }
static void decode_VxCOMISx(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
{
/*
* VUCOMISx and VCOMISx are different and use no-prefix and 0x66 for SS and SD
* respectively. Scalar values usually are associated with 0xF2 and 0xF3, for
* which X86_VEX_REPScalar exists, but here it has to be decoded by hand.
*/
entry->s1 = entry->s2 = (s->prefix & PREFIX_DATA ? X86_SIZE_sd : X86_SIZE_ss);
entry->gen = (*b == 0x2E ? gen_VUCOMI : gen_VCOMI);
}
static void decode_sse_unary(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) static void decode_sse_unary(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
{ {
if (!(s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))) { if (!(s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))) {
@ -813,7 +825,7 @@ static void decode_0FE6(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui
static const X86OpEntry opcodes_0FE6[4] = { static const X86OpEntry opcodes_0FE6[4] = {
{}, {},
X86_OP_ENTRY2(VCVTTPD2DQ, V,x, W,x, vex2), X86_OP_ENTRY2(VCVTTPD2DQ, V,x, W,x, vex2),
X86_OP_ENTRY2(VCVTDQ2PD, V,x, W,x, vex2), X86_OP_ENTRY2(VCVTDQ2PD, V,x, W,x, vex5),
X86_OP_ENTRY2(VCVTPD2DQ, V,x, W,x, vex2), X86_OP_ENTRY2(VCVTPD2DQ, V,x, W,x, vex2),
}; };
*entry = *decode_by_prefix(s, opcodes_0FE6); *entry = *decode_by_prefix(s, opcodes_0FE6);
@ -831,17 +843,17 @@ static const X86OpEntry opcodes_0F[256] = {
[0x10] = X86_OP_GROUP0(0F10), [0x10] = X86_OP_GROUP0(0F10),
[0x11] = X86_OP_GROUP0(0F11), [0x11] = X86_OP_GROUP0(0F11),
[0x12] = X86_OP_GROUP0(0F12), [0x12] = X86_OP_GROUP0(0F12),
[0x13] = X86_OP_ENTRY3(VMOVLPx_st, M,q, None,None, V,q, vex4 p_00_66), [0x13] = X86_OP_ENTRY3(VMOVLPx_st, M,q, None,None, V,q, vex5 p_00_66),
[0x14] = X86_OP_ENTRY3(VUNPCKLPx, V,x, H,x, W,x, vex4 p_00_66), [0x14] = X86_OP_ENTRY3(VUNPCKLPx, V,x, H,x, W,x, vex4 p_00_66),
[0x15] = X86_OP_ENTRY3(VUNPCKHPx, V,x, H,x, W,x, vex4 p_00_66), [0x15] = X86_OP_ENTRY3(VUNPCKHPx, V,x, H,x, W,x, vex4 p_00_66),
[0x16] = X86_OP_GROUP0(0F16), [0x16] = X86_OP_GROUP0(0F16),
/* Incorrectly listed as Mq,Vq in the manual */ /* Incorrectly listed as Mq,Vq in the manual */
[0x17] = X86_OP_ENTRY3(VMOVHPx_st, M,q, None,None, V,dq, vex4 p_00_66), [0x17] = X86_OP_ENTRY3(VMOVHPx_st, M,q, None,None, V,dq, vex5 p_00_66),
[0x50] = X86_OP_ENTRY3(MOVMSK, G,y, None,None, U,x, vex7 p_00_66), [0x50] = X86_OP_ENTRY3(MOVMSK, G,y, None,None, U,x, vex7 p_00_66),
[0x51] = X86_OP_GROUP3(sse_unary, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), [0x51] = X86_OP_GROUP3(sse_unary, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), /* sqrtps */
[0x52] = X86_OP_GROUP3(sse_unary, V,x, H,x, W,x, vex4_rep5 p_00_f3), [0x52] = X86_OP_GROUP3(sse_unary, V,x, H,x, W,x, vex4_rep5 p_00_f3), /* rsqrtps */
[0x53] = X86_OP_GROUP3(sse_unary, V,x, H,x, W,x, vex4_rep5 p_00_f3), [0x53] = X86_OP_GROUP3(sse_unary, V,x, H,x, W,x, vex4_rep5 p_00_f3), /* rcpps */
[0x54] = X86_OP_ENTRY3(PAND, V,x, H,x, W,x, vex4 p_00_66), /* vand */ [0x54] = X86_OP_ENTRY3(PAND, V,x, H,x, W,x, vex4 p_00_66), /* vand */
[0x55] = X86_OP_ENTRY3(PANDN, V,x, H,x, W,x, vex4 p_00_66), /* vandn */ [0x55] = X86_OP_ENTRY3(PANDN, V,x, H,x, W,x, vex4 p_00_66), /* vandn */
[0x56] = X86_OP_ENTRY3(POR, V,x, H,x, W,x, vex4 p_00_66), /* vor */ [0x56] = X86_OP_ENTRY3(POR, V,x, H,x, W,x, vex4 p_00_66), /* vor */
@ -871,15 +883,15 @@ static const X86OpEntry opcodes_0F[256] = {
[0x2B] = X86_OP_GROUP0(0F2B), [0x2B] = X86_OP_GROUP0(0F2B),
[0x2C] = X86_OP_GROUP0(0F2C), [0x2C] = X86_OP_GROUP0(0F2C),
[0x2D] = X86_OP_GROUP0(0F2D), [0x2D] = X86_OP_GROUP0(0F2D),
[0x2E] = X86_OP_ENTRY3(VUCOMI, None,None, V,x, W,x, vex4 p_00_66), [0x2E] = X86_OP_GROUP3(VxCOMISx, None,None, V,x, W,x, vex3 p_00_66), /* VUCOMISS/SD */
[0x2F] = X86_OP_ENTRY3(VCOMI, None,None, V,x, W,x, vex4 p_00_66), [0x2F] = X86_OP_GROUP3(VxCOMISx, None,None, V,x, W,x, vex3 p_00_66), /* VCOMISS/SD */
[0x38] = X86_OP_GROUP0(0F38), [0x38] = X86_OP_GROUP0(0F38),
[0x3a] = X86_OP_GROUP0(0F3A), [0x3a] = X86_OP_GROUP0(0F3A),
[0x58] = X86_OP_ENTRY3(VADD, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), [0x58] = X86_OP_ENTRY3(VADD, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2),
[0x59] = X86_OP_ENTRY3(VMUL, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), [0x59] = X86_OP_ENTRY3(VMUL, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2),
[0x5a] = X86_OP_GROUP3(sse_unary, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), [0x5a] = X86_OP_GROUP3(sse_unary, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), /* CVTPS2PD */
[0x5b] = X86_OP_GROUP0(0F5B), [0x5b] = X86_OP_GROUP0(0F5B),
[0x5c] = X86_OP_ENTRY3(VSUB, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), [0x5c] = X86_OP_ENTRY3(VSUB, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2),
[0x5d] = X86_OP_ENTRY3(VMIN, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), [0x5d] = X86_OP_ENTRY3(VMIN, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2),

View file

@ -2285,7 +2285,7 @@ static void gen_VZEROALL(DisasContext *s, CPUX86State *env, X86DecodedInsn *deco
{ {
TCGv_ptr ptr = tcg_temp_new_ptr(); TCGv_ptr ptr = tcg_temp_new_ptr();
tcg_gen_addi_ptr(ptr, cpu_env, offsetof(CPUX86State, xmm_t0)); tcg_gen_addi_ptr(ptr, cpu_env, offsetof(CPUX86State, xmm_regs));
gen_helper_memset(ptr, ptr, tcg_constant_i32(0), gen_helper_memset(ptr, ptr, tcg_constant_i32(0),
tcg_constant_ptr(CPU_NB_REGS * sizeof(ZMMReg))); tcg_constant_ptr(CPU_NB_REGS * sizeof(ZMMReg)));
} }

View file

@ -13,7 +13,7 @@ tcg_ss.add(files(
if get_option('tcg_interpreter') if get_option('tcg_interpreter')
libffi = dependency('libffi', version: '>=3.0', required: true, libffi = dependency('libffi', version: '>=3.0', required: true,
method: 'pkg-config', kwargs: static_kwargs) method: 'pkg-config')
specific_ss.add(libffi) specific_ss.add(libffi)
specific_ss.add(files('tci.c')) specific_ss.add(files('tci.c'))
endif endif

View file

@ -89,7 +89,8 @@ distclean-tcg: $(DISTCLEAN_TCG_TARGET_RULES)
# Build up our target list from the filtered list of ninja targets # Build up our target list from the filtered list of ninja targets
TARGETS=$(patsubst libqemu-%.fa, %, $(filter libqemu-%.fa, $(ninja-targets))) TARGETS=$(patsubst libqemu-%.fa, %, $(filter libqemu-%.fa, $(ninja-targets)))
TESTS_VENV_DIR=$(BUILD_DIR)/tests/venv TESTS_VENV_DIR=$(BUILD_DIR)/pyvenv
TESTS_VENV_TOKEN=$(BUILD_DIR)/pyvenv/tests.group
TESTS_VENV_REQ=$(SRC_PATH)/tests/requirements.txt TESTS_VENV_REQ=$(SRC_PATH)/tests/requirements.txt
TESTS_RESULTS_DIR=$(BUILD_DIR)/tests/results TESTS_RESULTS_DIR=$(BUILD_DIR)/tests/results
TESTS_PYTHON=$(TESTS_VENV_DIR)/bin/python3 TESTS_PYTHON=$(TESTS_VENV_DIR)/bin/python3
@ -111,8 +112,7 @@ quiet-venv-pip = $(quiet-@)$(call quiet-command-run, \
$(TESTS_PYTHON) -m pip -q --disable-pip-version-check $1, \ $(TESTS_PYTHON) -m pip -q --disable-pip-version-check $1, \
"VENVPIP","$1") "VENVPIP","$1")
$(TESTS_VENV_DIR): $(TESTS_VENV_REQ) $(TESTS_VENV_TOKEN): $(TESTS_VENV_REQ)
$(call quiet-command, $(PYTHON) -m venv $@, VENV, $@)
$(call quiet-venv-pip,install -e "$(SRC_PATH)/python/") $(call quiet-venv-pip,install -e "$(SRC_PATH)/python/")
$(call quiet-venv-pip,install -r $(TESTS_VENV_REQ)) $(call quiet-venv-pip,install -r $(TESTS_VENV_REQ))
$(call quiet-command, touch $@) $(call quiet-command, touch $@)
@ -121,7 +121,7 @@ $(TESTS_RESULTS_DIR):
$(call quiet-command, mkdir -p $@, \ $(call quiet-command, mkdir -p $@, \
MKDIR, $@) MKDIR, $@)
check-venv: $(TESTS_VENV_DIR) check-venv: $(TESTS_VENV_TOKEN)
FEDORA_31_ARCHES_TARGETS=$(patsubst %-softmmu,%, $(filter %-softmmu,$(TARGETS))) FEDORA_31_ARCHES_TARGETS=$(patsubst %-softmmu,%, $(filter %-softmmu,$(TARGETS)))
FEDORA_31_ARCHES_CANDIDATES=$(patsubst ppc64,ppc64le,$(FEDORA_31_ARCHES_TARGETS)) FEDORA_31_ARCHES_CANDIDATES=$(patsubst ppc64,ppc64le,$(FEDORA_31_ARCHES_TARGETS))
@ -167,7 +167,7 @@ check:
check-build: run-ninja check-build: run-ninja
check-clean: check-clean:
rm -rf $(TESTS_VENV_DIR) $(TESTS_RESULTS_DIR) rm -rf $(TESTS_RESULTS_DIR)
clean: check-clean clean-tcg clean: check-clean clean-tcg
distclean: distclean-tcg distclean: distclean-tcg

View file

@ -57,7 +57,8 @@ RUN DEBIAN_FRONTEND=noninteractive eatmydata \
gcc-sh4-linux-gnu \ gcc-sh4-linux-gnu \
libc6-dev-sh4-cross \ libc6-dev-sh4-cross \
gcc-sparc64-linux-gnu \ gcc-sparc64-linux-gnu \
libc6-dev-sparc64-cross libc6-dev-sparc64-cross \
python3-venv
ENV QEMU_CONFIGURE_OPTS --disable-system --disable-docs --disable-tools ENV QEMU_CONFIGURE_OPTS --disable-system --disable-docs --disable-tools
ENV DEF_TARGET_LIST aarch64-linux-user,alpha-linux-user,arm-linux-user,hppa-linux-user,i386-linux-user,m68k-linux-user,mips-linux-user,mips64-linux-user,mips64el-linux-user,mipsel-linux-user,ppc-linux-user,ppc64-linux-user,ppc64le-linux-user,riscv64-linux-user,s390x-linux-user,sh4-linux-user,sparc64-linux-user ENV DEF_TARGET_LIST aarch64-linux-user,alpha-linux-user,arm-linux-user,hppa-linux-user,i386-linux-user,m68k-linux-user,mips-linux-user,mips64-linux-user,mips64el-linux-user,mipsel-linux-user,ppc-linux-user,ppc64-linux-user,ppc64le-linux-user,riscv64-linux-user,s390x-linux-user,sh4-linux-user,sparc64-linux-user

View file

@ -20,7 +20,8 @@ RUN apt-get update && \
bison \ bison \
flex \ flex \
git \ git \
ninja-build && \ ninja-build \
python3-venv && \
# Install QEMU build deps for use in CI # Install QEMU build deps for use in CI
DEBIAN_FRONTEND=noninteractive eatmydata \ DEBIAN_FRONTEND=noninteractive eatmydata \
apt build-dep -yy --arch-only qemu apt build-dep -yy --arch-only qemu

View file

@ -28,7 +28,8 @@ RUN DEBIAN_FRONTEND=noninteractive eatmydata apt install -yy \
libglib2.0-dev \ libglib2.0-dev \
ninja-build \ ninja-build \
pkg-config \ pkg-config \
python3 python3 \
python3-venv
# Add ports and riscv64 architecture # Add ports and riscv64 architecture
RUN echo "deb http://ftp.ports.debian.org/debian-ports/ sid main" >> /etc/apt/sources.list RUN echo "deb http://ftp.ports.debian.org/debian-ports/ sid main" >> /etc/apt/sources.list

View file

@ -33,7 +33,8 @@ RUN apt update && \
pkgconf \ pkgconf \
python3-pip \ python3-pip \
python3-setuptools \ python3-setuptools \
python3-wheel python3-wheel \
python3-venv
RUN curl -#SL https://github.com/bkoppelmann/package_940/releases/download/tricore-toolchain-9.40/tricore-toolchain-9.4.0.tar.gz \ RUN curl -#SL https://github.com/bkoppelmann/package_940/releases/download/tricore-toolchain-9.40/tricore-toolchain-9.4.0.tar.gz \
| tar -xzC /usr/local/ | tar -xzC /usr/local/

View file

@ -2,7 +2,7 @@ if not have_tools or targetos == 'windows' or get_option('gprof')
subdir_done() subdir_done()
endif endif
foreach cflag: config_host['QEMU_CFLAGS'].split() foreach cflag: qemu_ldflags
if cflag.startswith('-fsanitize') and \ if cflag.startswith('-fsanitize') and \
not cflag.contains('safe-stack') and not cflag.contains('cfi-icall') not cflag.contains('safe-stack') and not cflag.contains('cfi-icall')
message('Sanitizers are enabled ==> Disabled the qemu-iotests.') message('Sanitizers are enabled ==> Disabled the qemu-iotests.')

View file

@ -23,7 +23,7 @@ qtests_generic = [
'readconfig-test', 'readconfig-test',
'netdev-socket', 'netdev-socket',
] ]
if config_host.has_key('CONFIG_MODULES') if enable_modules
qtests_generic += [ 'modules-test' ] qtests_generic += [ 'modules-test' ]
endif endif

View file

@ -1,6 +1,9 @@
# Add Python module requirements, one per line, to be installed # Add Python module requirements, one per line, to be installed
# in the tests/venv Python virtual environment. For more info, # in the qemu build_dir/pyvenv Python virtual environment. For more info,
# refer to: https://pip.pypa.io/en/stable/user_guide/#id1 # refer to: https://pip.pypa.io/en/stable/user_guide/#id1
# Note that qemu.git/python/ is always implicitly installed. #
# Note that qemu.git/python/ is implicitly installed to this venv when
# 'make check-venv' is run, and will persist until configure is run
# again.
avocado-framework==101.0 avocado-framework==101.0
pycdlib==1.11.0 pycdlib==1.11.0

View file

@ -49,7 +49,7 @@ imask = {
'VEXTRACT[FI]128': 0x01, 'VEXTRACT[FI]128': 0x01,
'VINSERT[FI]128': 0x01, 'VINSERT[FI]128': 0x01,
'VPBLENDD': 0xff, 'VPBLENDD': 0xff,
'VPERM2[FI]128': 0x33, 'VPERM2[FI]128': 0xbb,
'VPERMPD': 0xff, 'VPERMPD': 0xff,
'VPERMQ': 0xff, 'VPERMQ': 0xff,
'VPERMILPS': 0xff, 'VPERMILPS': 0xff,

View file

@ -147,7 +147,7 @@ if have_system
# Some tests: test-char, test-qdev-global-props, and test-qga, # Some tests: test-char, test-qdev-global-props, and test-qga,
# are not runnable under TSan due to a known issue. # are not runnable under TSan due to a known issue.
# https://github.com/google/sanitizers/issues/1116 # https://github.com/google/sanitizers/issues/1116
if 'CONFIG_TSAN' not in config_host if not get_option('tsan')
if 'CONFIG_POSIX' in config_host if 'CONFIG_POSIX' in config_host
tests += { tests += {
'test-char': ['socket-helpers.c', qom, io, chardev] 'test-char': ['socket-helpers.c', qom, io, chardev]

View file

@ -30,6 +30,8 @@ class NetBSDVM(basevm.BaseVM):
"git-base", "git-base",
"pkgconf", "pkgconf",
"xz", "xz",
"python310",
"py310-expat",
"ninja-build", "ninja-build",
# gnu tools # gnu tools

View file

@ -26,7 +26,9 @@ util_ss.add(when: 'CONFIG_WIN32', if_true: files('oslib-win32.c'))
util_ss.add(when: 'CONFIG_WIN32', if_true: files('qemu-thread-win32.c')) util_ss.add(when: 'CONFIG_WIN32', if_true: files('qemu-thread-win32.c'))
util_ss.add(when: 'CONFIG_WIN32', if_true: winmm) util_ss.add(when: 'CONFIG_WIN32', if_true: winmm)
util_ss.add(when: 'CONFIG_WIN32', if_true: pathcch) util_ss.add(when: 'CONFIG_WIN32', if_true: pathcch)
util_ss.add(when: 'HAVE_GLIB_WITH_SLICE_ALLOCATOR', if_true: files('qtree.c')) if glib_has_gslice
util_ss.add(files('qtree.c'))
endif
util_ss.add(files('envlist.c', 'path.c', 'module.c')) util_ss.add(files('envlist.c', 'path.c', 'module.c'))
util_ss.add(files('host-utils.c')) util_ss.add(files('host-utils.c'))
util_ss.add(files('bitmap.c', 'bitops.c')) util_ss.add(files('bitmap.c', 'bitops.c'))
@ -76,7 +78,7 @@ if have_block or have_ga
util_ss.add(files('base64.c')) util_ss.add(files('base64.c'))
util_ss.add(files('main-loop.c')) util_ss.add(files('main-loop.c'))
util_ss.add(files('qemu-coroutine.c', 'qemu-coroutine-lock.c', 'qemu-coroutine-io.c')) util_ss.add(files('qemu-coroutine.c', 'qemu-coroutine-lock.c', 'qemu-coroutine-io.c'))
util_ss.add(files('coroutine-@0@.c'.format(config_host['CONFIG_COROUTINE_BACKEND']))) util_ss.add(files(f'coroutine-@coroutine_backend@.c'))
util_ss.add(files('thread-pool.c', 'qemu-timer.c')) util_ss.add(files('thread-pool.c', 'qemu-timer.c'))
util_ss.add(files('qemu-sockets.c')) util_ss.add(files('qemu-sockets.c'))
endif endif