mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-04 08:13:54 -06:00
* qom: Use command line syntax for default values in help
* i386: support cache topology with machine's configuration * rust: fix duplicate symbols from monitor-fd.c * rust: add module to convert between success/-errno and io::Result * rust: move class_init implementation from trait to method * pvg: configuration improvements * kvm guestmemfd: replace assertion with error * riscv: cleanups * target/i386/hvf: cleanups to emulation * target/i386: add Zhaoxin and Yongfeng CPU model -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAme+10sUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroMkRwf/eT0gVbE3u0TS6EVZwjGZPHEOEyy/ gl39SlTT97HxoAClE4PRcdkn7YR3f30hytHghc4qhou+Eh/7Mj2Ox7l7+CyaaCS/ fxowsOVMBV7++PkyKRPxIMamKzD8Bo0eGwWe+CJijA0zt9PSI/YEwRV0pf/s6KCW pOya2f+aNbAo3O5RWtIKSISgbSVvuVzDcDHyfydmOHuvGr2NHAM8UfZYD+41qy5B 81PYlvK6HgvhaCboqCUADULkte96Xmc4p2ggk0ZNiy0ho46rs78SMyBh5sXR2S3I moiQHpJXyV5TcI7HmwvcW7s0/cpdKm/wmPOjb6otu9InWh/ON1nnURsTEQ== =V/fm -----END PGP SIGNATURE----- Merge tag 'for-upstream' of https://gitlab.com/bonzini/qemu into staging * qom: Use command line syntax for default values in help * i386: support cache topology with machine's configuration * rust: fix duplicate symbols from monitor-fd.c * rust: add module to convert between success/-errno and io::Result * rust: move class_init implementation from trait to method * pvg: configuration improvements * kvm guestmemfd: replace assertion with error * riscv: cleanups * target/i386/hvf: cleanups to emulation * target/i386: add Zhaoxin and Yongfeng CPU model # -----BEGIN PGP SIGNATURE----- # # iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAme+10sUHHBib256aW5p # QHJlZGhhdC5jb20ACgkQv/vSX3jHroMkRwf/eT0gVbE3u0TS6EVZwjGZPHEOEyy/ # gl39SlTT97HxoAClE4PRcdkn7YR3f30hytHghc4qhou+Eh/7Mj2Ox7l7+CyaaCS/ # fxowsOVMBV7++PkyKRPxIMamKzD8Bo0eGwWe+CJijA0zt9PSI/YEwRV0pf/s6KCW # pOya2f+aNbAo3O5RWtIKSISgbSVvuVzDcDHyfydmOHuvGr2NHAM8UfZYD+41qy5B # 81PYlvK6HgvhaCboqCUADULkte96Xmc4p2ggk0ZNiy0ho46rs78SMyBh5sXR2S3I # moiQHpJXyV5TcI7HmwvcW7s0/cpdKm/wmPOjb6otu9InWh/ON1nnURsTEQ== # =V/fm # -----END PGP SIGNATURE----- # gpg: Signature made Wed 26 Feb 2025 16:56:43 HKT # gpg: using RSA key F13338574B662389866C7682BFFBD25F78C7AE83 # gpg: issuer "pbonzini@redhat.com" # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [full] # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" [full] # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * tag 'for-upstream' of https://gitlab.com/bonzini/qemu: (34 commits) target/i386: Mask CMPLegacy bit in CPUID[0x80000001].ECX for Zhaoxin CPUs target/i386: Introduce Zhaoxin Yongfeng CPU model target/i386: Add CPUID leaf 0xC000_0001 EDX definitions target/i386: Add support for Zhaoxin CPU vendor identification target/riscv: move 128-bit check to TCG realize target/riscv: remove unused macro DEFINE_CPU i386/cpu: add has_caches flag to check smp_cache configuration i386/pc: Support cache topology in -machine for PC machine i386/cpu: Update cache topology with machine's configuration i386/cpu: Support module level cache topology rust: qom: get rid of ClassInitImpl rust: pl011, qemu_api tests: do not use ClassInitImpl rust: qom: add ObjectImpl::CLASS_INIT rust: add SysBusDeviceImpl rust: add IsA bounds to QOM implementation traits target/i386/hvf: drop some dead code target/i386/hvf: move and rename simulate_{rdmsr, wrmsr} target/i386/hvf: move and rename {load, store}_regs target/i386/hvf: use x86_segment in x86_decode.c target/i386/hvf: fix the declaration of hvf_handle_io ... Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
8d56d0fd2f
53 changed files with 1275 additions and 579 deletions
|
@ -61,3 +61,6 @@ config HV_BALLOON_POSSIBLE
|
|||
|
||||
config HAVE_RUST
|
||||
bool
|
||||
|
||||
config MAC_PVG
|
||||
bool
|
||||
|
|
|
@ -139,16 +139,22 @@ anymore.
|
|||
Writing Rust code in QEMU
|
||||
-------------------------
|
||||
|
||||
Right now QEMU includes three crates:
|
||||
QEMU includes four crates:
|
||||
|
||||
* ``qemu_api`` for bindings to C code and useful functionality
|
||||
|
||||
* ``qemu_api_macros`` defines several procedural macros that are useful when
|
||||
writing C code
|
||||
|
||||
* ``pl011`` (under ``rust/hw/char/pl011``) is the sample device that is being
|
||||
used to further develop ``qemu_api`` and ``qemu_api_macros``. It is a functional
|
||||
replacement for the ``hw/char/pl011.c`` file.
|
||||
* ``pl011`` (under ``rust/hw/char/pl011``) and ``hpet`` (under ``rust/hw/timer/hpet``)
|
||||
are sample devices that demonstrate ``qemu_api`` and ``qemu_api_macros``, and are
|
||||
used to further develop them. These two crates are functional\ [#issues]_ replacements
|
||||
for the ``hw/char/pl011.c`` and ``hw/timer/hpet.c`` files.
|
||||
|
||||
.. [#issues] The ``pl011`` crate is synchronized with ``hw/char/pl011.c``
|
||||
as of commit 02b1f7f61928. The ``hpet`` crate is synchronized as of
|
||||
commit f32352ff9e. Both are lacking tracing functionality; ``hpet``
|
||||
is also lacking support for migration.
|
||||
|
||||
This section explains how to work with them.
|
||||
|
||||
|
@ -179,6 +185,7 @@ module status
|
|||
``callbacks`` complete
|
||||
``cell`` stable
|
||||
``c_str`` complete
|
||||
``errno`` complete
|
||||
``irq`` complete
|
||||
``memory`` stable
|
||||
``module`` complete
|
||||
|
@ -293,7 +300,7 @@ to a Rust mutable reference, and use a shared reference instead. Rust code
|
|||
will then have to use QEMU's ``BqlRefCell`` and ``BqlCell`` type, which
|
||||
enforce that locking rules for the "Big QEMU Lock" are respected. These cell
|
||||
types are also known to the ``vmstate`` crate, which is able to "look inside"
|
||||
them when building an in-memory representation of a ``struct``s layout.
|
||||
them when building an in-memory representation of a ``struct``'s layout.
|
||||
Note that the same is not true of a ``RefCell`` or ``Mutex``.
|
||||
|
||||
In the future, similar cell types might also be provided for ``AioContext``-based
|
||||
|
@ -349,7 +356,7 @@ Writing procedural macros
|
|||
'''''''''''''''''''''''''
|
||||
|
||||
By conventions, procedural macros are split in two functions, one
|
||||
returning ``Result<proc_macro2::TokenStream, MacroError>` with the body of
|
||||
returning ``Result<proc_macro2::TokenStream, MacroError>`` with the body of
|
||||
the procedural macro, and the second returning ``proc_macro::TokenStream``
|
||||
which is the actual procedural macro. The former's name is the same as
|
||||
the latter with the ``_or_error`` suffix. The code for the latter is more
|
||||
|
|
|
@ -332,6 +332,8 @@ bool machine_parse_smp_cache(MachineState *ms,
|
|||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
mc->smp_props.has_caches = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -141,10 +141,6 @@ config XLNX_DISPLAYPORT
|
|||
config DM163
|
||||
bool
|
||||
|
||||
config MAC_PVG
|
||||
bool
|
||||
default y
|
||||
|
||||
config MAC_PVG_MMIO
|
||||
bool
|
||||
depends on MAC_PVG && AARCH64
|
||||
|
|
|
@ -61,13 +61,8 @@ system_ss.add(when: 'CONFIG_ARTIST', if_true: files('artist.c'))
|
|||
|
||||
system_ss.add(when: 'CONFIG_ATI_VGA', if_true: [files('ati.c', 'ati_2d.c', 'ati_dbg.c'), pixman])
|
||||
|
||||
if host_os == 'darwin'
|
||||
system_ss.add(when: 'CONFIG_MAC_PVG', if_true: [files('apple-gfx.m'), pvg, metal])
|
||||
system_ss.add(when: 'CONFIG_MAC_PVG_PCI', if_true: [files('apple-gfx-pci.m'), pvg, metal])
|
||||
if cpu == 'aarch64'
|
||||
system_ss.add(when: 'CONFIG_MAC_PVG_MMIO', if_true: [files('apple-gfx-mmio.m'), pvg, metal])
|
||||
endif
|
||||
endif
|
||||
system_ss.add(when: [pvg, 'CONFIG_MAC_PVG_PCI'], if_true: [files('apple-gfx.m', 'apple-gfx-pci.m')])
|
||||
system_ss.add(when: [pvg, 'CONFIG_MAC_PVG_MMIO'], if_true: [files('apple-gfx.m', 'apple-gfx-mmio.m')])
|
||||
|
||||
if config_all_devices.has_key('CONFIG_VIRTIO_GPU')
|
||||
virtio_gpu_ss = ss.source_set()
|
||||
|
|
|
@ -1798,6 +1798,10 @@ static void pc_machine_class_init(ObjectClass *oc, void *data)
|
|||
mc->nvdimm_supported = true;
|
||||
mc->smp_props.dies_supported = true;
|
||||
mc->smp_props.modules_supported = true;
|
||||
mc->smp_props.cache_supported[CACHE_LEVEL_AND_TYPE_L1D] = true;
|
||||
mc->smp_props.cache_supported[CACHE_LEVEL_AND_TYPE_L1I] = true;
|
||||
mc->smp_props.cache_supported[CACHE_LEVEL_AND_TYPE_L2] = true;
|
||||
mc->smp_props.cache_supported[CACHE_LEVEL_AND_TYPE_L3] = true;
|
||||
mc->default_ram_id = "pc.ram";
|
||||
pcmc->default_smbios_ep_type = SMBIOS_ENTRY_POINT_TYPE_AUTO;
|
||||
|
||||
|
|
|
@ -77,6 +77,7 @@ struct HPETState {
|
|||
uint8_t rtc_irq_level;
|
||||
qemu_irq pit_enabled;
|
||||
uint8_t num_timers;
|
||||
uint8_t num_timers_save;
|
||||
uint32_t intcap;
|
||||
HPETTimer timer[HPET_MAX_TIMERS];
|
||||
|
||||
|
@ -237,15 +238,12 @@ static int hpet_pre_save(void *opaque)
|
|||
s->hpet_counter = hpet_get_ticks(s);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hpet_pre_load(void *opaque)
|
||||
{
|
||||
HPETState *s = opaque;
|
||||
|
||||
/* version 1 only supports 3, later versions will load the actual value */
|
||||
s->num_timers = HPET_MIN_TIMERS;
|
||||
/*
|
||||
* The number of timers must match on source and destination, but it was
|
||||
* also added to the migration stream. Check that it matches the value
|
||||
* that was configured.
|
||||
*/
|
||||
s->num_timers_save = s->num_timers;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -253,12 +251,7 @@ static bool hpet_validate_num_timers(void *opaque, int version_id)
|
|||
{
|
||||
HPETState *s = opaque;
|
||||
|
||||
if (s->num_timers < HPET_MIN_TIMERS) {
|
||||
return false;
|
||||
} else if (s->num_timers > HPET_MAX_TIMERS) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
return s->num_timers == s->num_timers_save;
|
||||
}
|
||||
|
||||
static int hpet_post_load(void *opaque, int version_id)
|
||||
|
@ -277,16 +270,6 @@ static int hpet_post_load(void *opaque, int version_id)
|
|||
- qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
|
||||
}
|
||||
|
||||
/* Push number of timers into capability returned via HPET_ID */
|
||||
s->capability &= ~HPET_ID_NUM_TIM_MASK;
|
||||
s->capability |= (s->num_timers - 1) << HPET_ID_NUM_TIM_SHIFT;
|
||||
hpet_fw_cfg.hpet[s->hpet_id].event_timer_block_id = (uint32_t)s->capability;
|
||||
|
||||
/* Derive HPET_MSI_SUPPORT from the capability of the first timer. */
|
||||
s->flags &= ~(1 << HPET_MSI_SUPPORT);
|
||||
if (s->timer[0].config & HPET_TN_FSB_CAP) {
|
||||
s->flags |= 1 << HPET_MSI_SUPPORT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -347,14 +330,13 @@ static const VMStateDescription vmstate_hpet = {
|
|||
.version_id = 2,
|
||||
.minimum_version_id = 1,
|
||||
.pre_save = hpet_pre_save,
|
||||
.pre_load = hpet_pre_load,
|
||||
.post_load = hpet_post_load,
|
||||
.fields = (const VMStateField[]) {
|
||||
VMSTATE_UINT64(config, HPETState),
|
||||
VMSTATE_UINT64(isr, HPETState),
|
||||
VMSTATE_UINT64(hpet_counter, HPETState),
|
||||
VMSTATE_UINT8_V(num_timers, HPETState, 2),
|
||||
VMSTATE_VALIDATE("num_timers in range", hpet_validate_num_timers),
|
||||
VMSTATE_UINT8_V(num_timers_save, HPETState, 2),
|
||||
VMSTATE_VALIDATE("num_timers must match", hpet_validate_num_timers),
|
||||
VMSTATE_STRUCT_VARRAY_UINT8(timer, HPETState, num_timers, 0,
|
||||
vmstate_hpet_timer, HPETTimer),
|
||||
VMSTATE_END_OF_LIST()
|
||||
|
|
|
@ -156,6 +156,8 @@ typedef struct {
|
|||
* @modules_supported - whether modules are supported by the machine
|
||||
* @cache_supported - whether cache (l1d, l1i, l2 and l3) configuration are
|
||||
* supported by the machine
|
||||
* @has_caches - whether cache properties are explicitly specified in the
|
||||
* user provided smp-cache configuration
|
||||
*/
|
||||
typedef struct {
|
||||
bool prefer_sockets;
|
||||
|
@ -166,6 +168,7 @@ typedef struct {
|
|||
bool drawers_supported;
|
||||
bool modules_supported;
|
||||
bool cache_supported[CACHE_LEVEL_AND_TYPE__MAX];
|
||||
bool has_caches;
|
||||
} SMPCompatProps;
|
||||
|
||||
/**
|
||||
|
|
14
meson.build
14
meson.build
|
@ -821,7 +821,6 @@ version_res = []
|
|||
coref = []
|
||||
iokit = []
|
||||
pvg = not_found
|
||||
metal = []
|
||||
emulator_link_args = []
|
||||
midl = not_found
|
||||
widl = not_found
|
||||
|
@ -843,8 +842,8 @@ elif host_os == 'darwin'
|
|||
coref = dependency('appleframeworks', modules: 'CoreFoundation')
|
||||
iokit = dependency('appleframeworks', modules: 'IOKit', required: false)
|
||||
host_dsosuf = '.dylib'
|
||||
pvg = dependency('appleframeworks', modules: 'ParavirtualizedGraphics')
|
||||
metal = dependency('appleframeworks', modules: 'Metal')
|
||||
pvg = dependency('appleframeworks', modules: ['ParavirtualizedGraphics', 'Metal'],
|
||||
required: get_option('pvg'))
|
||||
elif host_os == 'sunos'
|
||||
socket = [cc.find_library('socket'),
|
||||
cc.find_library('nsl'),
|
||||
|
@ -3367,6 +3366,12 @@ foreach target : target_dirs
|
|||
target_kconfig += 'CONFIG_' + config_target['TARGET_ARCH'].to_upper() + '=y'
|
||||
target_kconfig += 'CONFIG_TARGET_BIG_ENDIAN=' + config_target['TARGET_BIG_ENDIAN']
|
||||
|
||||
# PVG is not cross-architecture. Use accelerator_targets as a proxy to
|
||||
# figure out which target can support PVG on this host
|
||||
if pvg.found() and target in accelerator_targets.get('CONFIG_HVF', [])
|
||||
target_kconfig += 'CONFIG_MAC_PVG=y'
|
||||
endif
|
||||
|
||||
config_input = meson.get_external_property(target, 'default')
|
||||
config_devices_mak = target + '-config-devices.mak'
|
||||
config_devices_mak = configure_file(
|
||||
|
@ -4840,6 +4845,9 @@ summary_info += {'libdw': libdw}
|
|||
if host_os == 'freebsd'
|
||||
summary_info += {'libinotify-kqueue': inotify}
|
||||
endif
|
||||
if host_os == 'darwin'
|
||||
summary_info += {'ParavirtualizedGraphics support': pvg}
|
||||
endif
|
||||
summary(summary_info, bool_yn: true, section: 'Dependencies')
|
||||
|
||||
if host_arch == 'unknown'
|
||||
|
|
|
@ -198,6 +198,8 @@ option('lzfse', type : 'feature', value : 'auto',
|
|||
description: 'lzfse support for DMG images')
|
||||
option('lzo', type : 'feature', value : 'auto',
|
||||
description: 'lzo compression support')
|
||||
option('pvg', type: 'feature', value: 'auto',
|
||||
description: 'macOS paravirtualized graphics support')
|
||||
option('rbd', type : 'feature', value : 'auto',
|
||||
description: 'Ceph block device driver')
|
||||
option('opengl', type : 'feature', value : 'auto',
|
||||
|
|
|
@ -42,7 +42,8 @@ DEF("machine", HAS_ARG, QEMU_OPTION_machine, \
|
|||
" aux-ram-share=on|off allocate auxiliary guest RAM as shared (default: off)\n"
|
||||
#endif
|
||||
" memory-backend='backend-id' specifies explicitly provided backend for main RAM (default=none)\n"
|
||||
" cxl-fmw.0.targets.0=firsttarget,cxl-fmw.0.targets.1=secondtarget,cxl-fmw.0.size=size[,cxl-fmw.0.interleave-granularity=granularity]\n",
|
||||
" cxl-fmw.0.targets.0=firsttarget,cxl-fmw.0.targets.1=secondtarget,cxl-fmw.0.size=size[,cxl-fmw.0.interleave-granularity=granularity]\n"
|
||||
" smp-cache.0.cache=cachename,smp-cache.0.topology=topologylevel\n",
|
||||
QEMU_ARCH_ALL)
|
||||
SRST
|
||||
``-machine [type=]name[,prop=value[,...]]``
|
||||
|
@ -172,6 +173,33 @@ SRST
|
|||
::
|
||||
|
||||
-machine cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.targets.1=cxl.1,cxl-fmw.0.size=128G,cxl-fmw.0.interleave-granularity=512
|
||||
|
||||
``smp-cache.0.cache=cachename,smp-cache.0.topology=topologylevel``
|
||||
Define cache properties for SMP system.
|
||||
|
||||
``cache=cachename`` specifies the cache that the properties will be
|
||||
applied on. This field is the combination of cache level and cache
|
||||
type. It supports ``l1d`` (L1 data cache), ``l1i`` (L1 instruction
|
||||
cache), ``l2`` (L2 unified cache) and ``l3`` (L3 unified cache).
|
||||
|
||||
``topology=topologylevel`` sets the cache topology level. It accepts
|
||||
CPU topology levels including ``core``, ``module``, ``cluster``, ``die``,
|
||||
``socket``, ``book``, ``drawer`` and a special value ``default``. If
|
||||
``default`` is set, then the cache topology will follow the architecture's
|
||||
default cache topology model. If another topology level is set, the cache
|
||||
will be shared at corresponding CPU topology level. For example,
|
||||
``topology=core`` makes the cache shared by all threads within a core.
|
||||
The omitting cache will default to using the ``default`` level.
|
||||
|
||||
The default cache topology model for an i386 PC machine is as follows:
|
||||
``l1d``, ``l1i``, and ``l2`` caches are per ``core``, while the ``l3``
|
||||
cache is per ``die``.
|
||||
|
||||
Example:
|
||||
|
||||
::
|
||||
|
||||
-machine smp-cache.0.cache=l1d,smp-cache.0.topology=core,smp-cache.1.cache=l1i,smp-cache.1.topology=core
|
||||
ERST
|
||||
|
||||
DEF("M", HAS_ARG, QEMU_OPTION_M,
|
||||
|
|
|
@ -4,9 +4,11 @@
|
|||
#include "qapi/error.h"
|
||||
#include "qapi/qapi-visit-qom.h"
|
||||
#include "qobject/qobject.h"
|
||||
#include "qobject/qbool.h"
|
||||
#include "qobject/qdict.h"
|
||||
#include "qapi/qmp/qerror.h"
|
||||
#include "qobject/qjson.h"
|
||||
#include "qobject/qstring.h"
|
||||
#include "qapi/qobject-input-visitor.h"
|
||||
#include "qapi/qobject-output-visitor.h"
|
||||
#include "qom/object_interfaces.h"
|
||||
|
@ -177,9 +179,25 @@ char *object_property_help(const char *name, const char *type,
|
|||
g_string_append(str, description);
|
||||
}
|
||||
if (defval) {
|
||||
g_autofree char *def_json = g_string_free(qobject_to_json(defval),
|
||||
false);
|
||||
g_string_append_printf(str, " (default: %s)", def_json);
|
||||
g_autofree char *def_json = NULL;
|
||||
const char *def;
|
||||
|
||||
switch (qobject_type(defval)) {
|
||||
case QTYPE_QSTRING:
|
||||
def = qstring_get_str(qobject_to(QString, defval));
|
||||
break;
|
||||
|
||||
case QTYPE_QBOOL:
|
||||
def = qbool_get_bool(qobject_to(QBool, defval)) ? "on" : "off";
|
||||
break;
|
||||
|
||||
default:
|
||||
def_json = g_string_free(qobject_to_json(defval), false);
|
||||
def = def_json;
|
||||
break;
|
||||
}
|
||||
|
||||
g_string_append_printf(str, " (default: %s)", def);
|
||||
}
|
||||
|
||||
return g_string_free(str, false);
|
||||
|
|
7
rust/Cargo.lock
generated
7
rust/Cargo.lock
generated
|
@ -54,6 +54,12 @@ dependencies = [
|
|||
"either",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.162"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398"
|
||||
|
||||
[[package]]
|
||||
name = "pl011"
|
||||
version = "0.1.0"
|
||||
|
@ -100,6 +106,7 @@ dependencies = [
|
|||
name = "qemu_api"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"qemu_api_macros",
|
||||
"version_check",
|
||||
]
|
||||
|
|
|
@ -2,10 +2,10 @@
|
|||
// Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
use core::ptr::{addr_of, addr_of_mut, NonNull};
|
||||
use std::{
|
||||
ffi::CStr,
|
||||
os::raw::{c_int, c_void},
|
||||
ptr::{addr_of, addr_of_mut, NonNull},
|
||||
};
|
||||
|
||||
use qemu_api::{
|
||||
|
@ -19,8 +19,8 @@ use qemu_api::{
|
|||
memory::{hwaddr, MemoryRegion, MemoryRegionOps, MemoryRegionOpsBuilder},
|
||||
prelude::*,
|
||||
qdev::{Clock, ClockEvent, DeviceImpl, DeviceState, Property, ResetType, ResettablePhasesImpl},
|
||||
qom::{ClassInitImpl, ObjectImpl, Owned, ParentField},
|
||||
sysbus::{SysBusDevice, SysBusDeviceClass},
|
||||
qom::{ObjectImpl, Owned, ParentField},
|
||||
sysbus::{SysBusDevice, SysBusDeviceImpl},
|
||||
vmstate::VMStateDescription,
|
||||
};
|
||||
|
||||
|
@ -50,11 +50,6 @@ impl std::ops::Index<hwaddr> for DeviceId {
|
|||
}
|
||||
}
|
||||
|
||||
impl DeviceId {
|
||||
const ARM: Self = Self(&[0x11, 0x10, 0x14, 0x00, 0x0d, 0xf0, 0x05, 0xb1]);
|
||||
const LUMINARY: Self = Self(&[0x11, 0x00, 0x18, 0x01, 0x0d, 0xf0, 0x05, 0xb1]);
|
||||
}
|
||||
|
||||
// FIFOs use 32-bit indices instead of usize, for compatibility with
|
||||
// the migration stream produced by the C version of this device.
|
||||
#[repr(transparent)]
|
||||
|
@ -143,16 +138,24 @@ pub struct PL011Class {
|
|||
device_id: DeviceId,
|
||||
}
|
||||
|
||||
trait PL011Impl: SysBusDeviceImpl + IsA<PL011State> {
|
||||
const DEVICE_ID: DeviceId;
|
||||
}
|
||||
|
||||
impl PL011Class {
|
||||
fn class_init<T: PL011Impl>(&mut self) {
|
||||
self.device_id = T::DEVICE_ID;
|
||||
self.parent_class.class_init::<T>();
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl ObjectType for PL011State {
|
||||
type Class = PL011Class;
|
||||
const TYPE_NAME: &'static CStr = crate::TYPE_PL011;
|
||||
}
|
||||
|
||||
impl ClassInitImpl<PL011Class> for PL011State {
|
||||
fn class_init(klass: &mut PL011Class) {
|
||||
klass.device_id = DeviceId::ARM;
|
||||
<Self as ClassInitImpl<SysBusDeviceClass>>::class_init(&mut klass.parent_class);
|
||||
}
|
||||
impl PL011Impl for PL011State {
|
||||
const DEVICE_ID: DeviceId = DeviceId(&[0x11, 0x10, 0x14, 0x00, 0x0d, 0xf0, 0x05, 0xb1]);
|
||||
}
|
||||
|
||||
impl ObjectImpl for PL011State {
|
||||
|
@ -160,6 +163,7 @@ impl ObjectImpl for PL011State {
|
|||
|
||||
const INSTANCE_INIT: Option<unsafe fn(&mut Self)> = Some(Self::init);
|
||||
const INSTANCE_POST_INIT: Option<fn(&Self)> = Some(Self::post_init);
|
||||
const CLASS_INIT: fn(&mut Self::Class) = Self::Class::class_init::<Self>;
|
||||
}
|
||||
|
||||
impl DeviceImpl for PL011State {
|
||||
|
@ -176,6 +180,8 @@ impl ResettablePhasesImpl for PL011State {
|
|||
const HOLD: Option<fn(&Self, ResetType)> = Some(Self::reset_hold);
|
||||
}
|
||||
|
||||
impl SysBusDeviceImpl for PL011State {}
|
||||
|
||||
impl PL011Registers {
|
||||
pub(self) fn read(&mut self, offset: RegisterOffset) -> (bool, u32) {
|
||||
use RegisterOffset::*;
|
||||
|
@ -726,13 +732,6 @@ pub struct PL011Luminary {
|
|||
parent_obj: ParentField<PL011State>,
|
||||
}
|
||||
|
||||
impl ClassInitImpl<PL011Class> for PL011Luminary {
|
||||
fn class_init(klass: &mut PL011Class) {
|
||||
klass.device_id = DeviceId::LUMINARY;
|
||||
<Self as ClassInitImpl<SysBusDeviceClass>>::class_init(&mut klass.parent_class);
|
||||
}
|
||||
}
|
||||
|
||||
qom_isa!(PL011Luminary : PL011State, SysBusDevice, DeviceState, Object);
|
||||
|
||||
unsafe impl ObjectType for PL011Luminary {
|
||||
|
@ -742,7 +741,14 @@ unsafe impl ObjectType for PL011Luminary {
|
|||
|
||||
impl ObjectImpl for PL011Luminary {
|
||||
type ParentType = PL011State;
|
||||
|
||||
const CLASS_INIT: fn(&mut Self::Class) = Self::Class::class_init::<Self>;
|
||||
}
|
||||
|
||||
impl PL011Impl for PL011Luminary {
|
||||
const DEVICE_ID: DeviceId = DeviceId(&[0x11, 0x00, 0x18, 0x01, 0x0d, 0xf0, 0x05, 0xb1]);
|
||||
}
|
||||
|
||||
impl DeviceImpl for PL011Luminary {}
|
||||
impl ResettablePhasesImpl for PL011Luminary {}
|
||||
impl SysBusDeviceImpl for PL011Luminary {}
|
||||
|
|
|
@ -2,8 +2,10 @@
|
|||
// Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
use core::ptr::NonNull;
|
||||
use std::os::raw::{c_int, c_void};
|
||||
use std::{
|
||||
os::raw::{c_int, c_void},
|
||||
ptr::NonNull,
|
||||
};
|
||||
|
||||
use qemu_api::{
|
||||
bindings::*, c_str, prelude::*, vmstate_clock, vmstate_fields, vmstate_of, vmstate_struct,
|
||||
|
|
|
@ -1,2 +1,3 @@
|
|||
config X_HPET_RUST
|
||||
bool
|
||||
default y if PC && HAVE_RUST
|
||||
|
|
|
@ -23,7 +23,7 @@ use qemu_api::{
|
|||
qdev::{DeviceImpl, DeviceMethods, DeviceState, Property, ResetType, ResettablePhasesImpl},
|
||||
qom::{ObjectImpl, ObjectType, ParentField},
|
||||
qom_isa,
|
||||
sysbus::SysBusDevice,
|
||||
sysbus::{SysBusDevice, SysBusDeviceImpl},
|
||||
timer::{Timer, CLOCK_VIRTUAL},
|
||||
};
|
||||
|
||||
|
@ -836,6 +836,7 @@ impl ObjectImpl for HPETState {
|
|||
|
||||
const INSTANCE_INIT: Option<unsafe fn(&mut Self)> = Some(Self::init);
|
||||
const INSTANCE_POST_INIT: Option<fn(&Self)> = Some(Self::post_init);
|
||||
const CLASS_INIT: fn(&mut Self::Class) = Self::Class::class_init::<Self>;
|
||||
}
|
||||
|
||||
// TODO: Make these properties user-configurable!
|
||||
|
@ -887,3 +888,5 @@ impl DeviceImpl for HPETState {
|
|||
impl ResettablePhasesImpl for HPETState {
|
||||
const HOLD: Option<fn(&Self, ResetType)> = Some(Self::reset_hold);
|
||||
}
|
||||
|
||||
impl SysBusDeviceImpl for HPETState {}
|
||||
|
|
|
@ -16,6 +16,7 @@ rust-version = "1.63.0"
|
|||
|
||||
[dependencies]
|
||||
qemu_api_macros = { path = "../qemu-api-macros" }
|
||||
libc = "0.2.162"
|
||||
|
||||
[build-dependencies]
|
||||
version_check = "~0.9"
|
||||
|
|
|
@ -2,6 +2,8 @@ _qemu_api_cfg = run_command(rustc_args,
|
|||
'--config-headers', config_host_h, '--features', files('Cargo.toml'),
|
||||
capture: true, check: true).stdout().strip().splitlines()
|
||||
|
||||
libc_dep = dependency('libc-0.2-rs')
|
||||
|
||||
# _qemu_api_cfg += ['--cfg', 'feature="allocator"']
|
||||
if rustc.version().version_compare('>=1.77.0')
|
||||
_qemu_api_cfg += ['--cfg', 'has_offset_of']
|
||||
|
@ -22,6 +24,7 @@ _qemu_api_rs = static_library(
|
|||
'src/cell.rs',
|
||||
'src/chardev.rs',
|
||||
'src/c_str.rs',
|
||||
'src/errno.rs',
|
||||
'src/irq.rs',
|
||||
'src/memory.rs',
|
||||
'src/module.rs',
|
||||
|
@ -39,6 +42,7 @@ _qemu_api_rs = static_library(
|
|||
override_options: ['rust_std=2021', 'build.rust_std=2021'],
|
||||
rust_abi: 'rust',
|
||||
rust_args: _qemu_api_cfg,
|
||||
dependencies: libc_dep,
|
||||
)
|
||||
|
||||
rust.test('rust-qemu-api-tests', _qemu_api_rs,
|
||||
|
|
|
@ -92,3 +92,31 @@ macro_rules! assert_field_type {
|
|||
};
|
||||
};
|
||||
}
|
||||
|
||||
/// Assert that an expression matches a pattern. This can also be
|
||||
/// useful to compare enums that do not implement `Eq`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// # use qemu_api::assert_match;
|
||||
/// // JoinHandle does not implement `Eq`, therefore the result
|
||||
/// // does not either.
|
||||
/// let result: Result<std::thread::JoinHandle<()>, u32> = Err(42);
|
||||
/// assert_match!(result, Err(42));
|
||||
/// ```
|
||||
#[macro_export]
|
||||
macro_rules! assert_match {
|
||||
($a:expr, $b:pat) => {
|
||||
assert!(
|
||||
match $a {
|
||||
$b => true,
|
||||
_ => false,
|
||||
},
|
||||
"{} = {:?} does not match {}",
|
||||
stringify!($a),
|
||||
$a,
|
||||
stringify!($b)
|
||||
);
|
||||
};
|
||||
}
|
||||
|
|
345
rust/qemu-api/src/errno.rs
Normal file
345
rust/qemu-api/src/errno.rs
Normal file
|
@ -0,0 +1,345 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
//! Utility functions to convert `errno` to and from
|
||||
//! [`io::Error`]/[`io::Result`]
|
||||
//!
|
||||
//! QEMU C functions often have a "positive success/negative `errno`" calling
|
||||
//! convention. This module provides functions to portably convert an integer
|
||||
//! into an [`io::Result`] and back.
|
||||
|
||||
use std::{convert::TryFrom, io, io::ErrorKind};
|
||||
|
||||
/// An `errno` value that can be converted into an [`io::Error`]
|
||||
pub struct Errno(pub u16);
|
||||
|
||||
// On Unix, from_raw_os_error takes an errno value and OS errors
|
||||
// are printed using strerror. On Windows however it takes a
|
||||
// GetLastError() value; therefore we need to convert errno values
|
||||
// into io::Error by hand. This is the same mapping that the
|
||||
// standard library uses to retrieve the kind of OS errors
|
||||
// (`std::sys::pal::unix::decode_error_kind`).
|
||||
impl From<Errno> for ErrorKind {
|
||||
fn from(value: Errno) -> ErrorKind {
|
||||
use ErrorKind::*;
|
||||
let Errno(errno) = value;
|
||||
match i32::from(errno) {
|
||||
libc::EPERM | libc::EACCES => PermissionDenied,
|
||||
libc::ENOENT => NotFound,
|
||||
libc::EINTR => Interrupted,
|
||||
x if x == libc::EAGAIN || x == libc::EWOULDBLOCK => WouldBlock,
|
||||
libc::ENOMEM => OutOfMemory,
|
||||
libc::EEXIST => AlreadyExists,
|
||||
libc::EINVAL => InvalidInput,
|
||||
libc::EPIPE => BrokenPipe,
|
||||
libc::EADDRINUSE => AddrInUse,
|
||||
libc::EADDRNOTAVAIL => AddrNotAvailable,
|
||||
libc::ECONNABORTED => ConnectionAborted,
|
||||
libc::ECONNREFUSED => ConnectionRefused,
|
||||
libc::ECONNRESET => ConnectionReset,
|
||||
libc::ENOTCONN => NotConnected,
|
||||
libc::ENOTSUP => Unsupported,
|
||||
libc::ETIMEDOUT => TimedOut,
|
||||
_ => Other,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This is used on Windows for all io::Errors, but also on Unix if the
|
||||
// io::Error does not have a raw OS error. This is the reversed
|
||||
// mapping of the above; EIO is returned for unknown ErrorKinds.
|
||||
impl From<io::ErrorKind> for Errno {
|
||||
fn from(value: io::ErrorKind) -> Errno {
|
||||
use ErrorKind::*;
|
||||
let errno = match value {
|
||||
// can be both EPERM or EACCES :( pick one
|
||||
PermissionDenied => libc::EPERM,
|
||||
NotFound => libc::ENOENT,
|
||||
Interrupted => libc::EINTR,
|
||||
WouldBlock => libc::EAGAIN,
|
||||
OutOfMemory => libc::ENOMEM,
|
||||
AlreadyExists => libc::EEXIST,
|
||||
InvalidInput => libc::EINVAL,
|
||||
BrokenPipe => libc::EPIPE,
|
||||
AddrInUse => libc::EADDRINUSE,
|
||||
AddrNotAvailable => libc::EADDRNOTAVAIL,
|
||||
ConnectionAborted => libc::ECONNABORTED,
|
||||
ConnectionRefused => libc::ECONNREFUSED,
|
||||
ConnectionReset => libc::ECONNRESET,
|
||||
NotConnected => libc::ENOTCONN,
|
||||
Unsupported => libc::ENOTSUP,
|
||||
TimedOut => libc::ETIMEDOUT,
|
||||
_ => libc::EIO,
|
||||
};
|
||||
Errno(errno as u16)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Errno> for io::Error {
|
||||
#[cfg(unix)]
|
||||
fn from(value: Errno) -> io::Error {
|
||||
let Errno(errno) = value;
|
||||
io::Error::from_raw_os_error(errno.into())
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
fn from(value: Errno) -> io::Error {
|
||||
let error_kind: ErrorKind = value.into();
|
||||
error_kind.into()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for Errno {
|
||||
fn from(value: io::Error) -> Errno {
|
||||
if cfg!(unix) {
|
||||
if let Some(errno) = value.raw_os_error() {
|
||||
return Errno(u16::try_from(errno).unwrap());
|
||||
}
|
||||
}
|
||||
value.kind().into()
|
||||
}
|
||||
}
|
||||
|
||||
/// Internal traits; used to enable [`into_io_result`] and [`into_neg_errno`]
|
||||
/// for the "right" set of types.
|
||||
mod traits {
|
||||
use super::Errno;
|
||||
|
||||
/// A signed type that can be converted into an
|
||||
/// [`io::Result`](std::io::Result)
|
||||
pub trait GetErrno {
|
||||
/// Unsigned variant of `Self`, used as the type for the `Ok` case.
|
||||
type Out;
|
||||
|
||||
/// Return `Ok(self)` if positive, `Err(Errno(-self))` if negative
|
||||
fn into_errno_result(self) -> Result<Self::Out, Errno>;
|
||||
}
|
||||
|
||||
/// A type that can be taken out of an [`io::Result`](std::io::Result) and
|
||||
/// converted into "positive success/negative `errno`" convention.
|
||||
pub trait MergeErrno {
|
||||
/// Signed variant of `Self`, used as the return type of
|
||||
/// [`into_neg_errno`](super::into_neg_errno).
|
||||
type Out: From<u16> + std::ops::Neg<Output = Self::Out>;
|
||||
|
||||
/// Return `self`, asserting that it is in range
|
||||
fn map_ok(self) -> Self::Out;
|
||||
}
|
||||
|
||||
macro_rules! get_errno {
|
||||
($t:ty, $out:ty) => {
|
||||
impl GetErrno for $t {
|
||||
type Out = $out;
|
||||
fn into_errno_result(self) -> Result<Self::Out, Errno> {
|
||||
match self {
|
||||
0.. => Ok(self as $out),
|
||||
-65535..=-1 => Err(Errno(-self as u16)),
|
||||
_ => panic!("{self} is not a negative errno"),
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
get_errno!(i32, u32);
|
||||
get_errno!(i64, u64);
|
||||
get_errno!(isize, usize);
|
||||
|
||||
macro_rules! merge_errno {
|
||||
($t:ty, $out:ty) => {
|
||||
impl MergeErrno for $t {
|
||||
type Out = $out;
|
||||
fn map_ok(self) -> Self::Out {
|
||||
self.try_into().unwrap()
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
merge_errno!(u8, i32);
|
||||
merge_errno!(u16, i32);
|
||||
merge_errno!(u32, i32);
|
||||
merge_errno!(u64, i64);
|
||||
|
||||
impl MergeErrno for () {
|
||||
type Out = i32;
|
||||
fn map_ok(self) -> i32 {
|
||||
0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
use traits::{GetErrno, MergeErrno};
|
||||
|
||||
/// Convert an integer value into a [`io::Result`].
|
||||
///
|
||||
/// Positive values are turned into an `Ok` result; negative values
|
||||
/// are interpreted as negated `errno` and turned into an `Err`.
|
||||
///
|
||||
/// ```
|
||||
/// # use qemu_api::errno::into_io_result;
|
||||
/// # use std::io::ErrorKind;
|
||||
/// let ok = into_io_result(1i32).unwrap();
|
||||
/// assert_eq!(ok, 1u32);
|
||||
///
|
||||
/// let err = into_io_result(-1i32).unwrap_err(); // -EPERM
|
||||
/// assert_eq!(err.kind(), ErrorKind::PermissionDenied);
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Since the result is an unsigned integer, negative values must
|
||||
/// be close to 0; values that are too far away are considered
|
||||
/// likely overflows and will panic:
|
||||
///
|
||||
/// ```should_panic
|
||||
/// # use qemu_api::errno::into_io_result;
|
||||
/// # #[allow(dead_code)]
|
||||
/// let err = into_io_result(-0x1234_5678i32); // panic
|
||||
/// ```
|
||||
pub fn into_io_result<T: GetErrno>(value: T) -> io::Result<T::Out> {
|
||||
value.into_errno_result().map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Convert a [`Result`] into an integer value, using negative `errno`
|
||||
/// values to report errors.
|
||||
///
|
||||
/// ```
|
||||
/// # use qemu_api::errno::into_neg_errno;
|
||||
/// # use std::io::{self, ErrorKind};
|
||||
/// let ok: io::Result<()> = Ok(());
|
||||
/// assert_eq!(into_neg_errno(ok), 0);
|
||||
///
|
||||
/// let err: io::Result<()> = Err(ErrorKind::InvalidInput.into());
|
||||
/// assert_eq!(into_neg_errno(err), -22); // -EINVAL
|
||||
/// ```
|
||||
///
|
||||
/// Since this module also provides the ability to convert [`io::Error`]
|
||||
/// to an `errno` value, [`io::Result`] is the most commonly used type
|
||||
/// for the argument of this function:
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Since the result is a signed integer, integer `Ok` values must remain
|
||||
/// positive:
|
||||
///
|
||||
/// ```should_panic
|
||||
/// # use qemu_api::errno::into_neg_errno;
|
||||
/// # use std::io;
|
||||
/// let err: io::Result<u32> = Ok(0x8899_AABB);
|
||||
/// into_neg_errno(err) // panic
|
||||
/// # ;
|
||||
/// ```
|
||||
pub fn into_neg_errno<T: MergeErrno, E: Into<Errno>>(value: Result<T, E>) -> T::Out {
|
||||
match value {
|
||||
Ok(x) => x.map_ok(),
|
||||
Err(err) => -T::Out::from(err.into().0),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::ErrorKind;
|
||||
|
||||
use super::*;
|
||||
use crate::assert_match;
|
||||
|
||||
#[test]
|
||||
pub fn test_from_u8() {
|
||||
let ok: io::Result<_> = Ok(42u8);
|
||||
assert_eq!(into_neg_errno(ok), 42);
|
||||
|
||||
let err: io::Result<u8> = Err(io::ErrorKind::PermissionDenied.into());
|
||||
assert_eq!(into_neg_errno(err), -1);
|
||||
|
||||
if cfg!(unix) {
|
||||
let os_err: io::Result<u8> = Err(io::Error::from_raw_os_error(10));
|
||||
assert_eq!(into_neg_errno(os_err), -10);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_from_u16() {
|
||||
let ok: io::Result<_> = Ok(1234u16);
|
||||
assert_eq!(into_neg_errno(ok), 1234);
|
||||
|
||||
let err: io::Result<u16> = Err(io::ErrorKind::PermissionDenied.into());
|
||||
assert_eq!(into_neg_errno(err), -1);
|
||||
|
||||
if cfg!(unix) {
|
||||
let os_err: io::Result<u16> = Err(io::Error::from_raw_os_error(10));
|
||||
assert_eq!(into_neg_errno(os_err), -10);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_i32() {
|
||||
assert_match!(into_io_result(1234i32), Ok(1234));
|
||||
|
||||
let err = into_io_result(-1i32).unwrap_err();
|
||||
#[cfg(unix)]
|
||||
assert_match!(err.raw_os_error(), Some(1));
|
||||
assert_match!(err.kind(), ErrorKind::PermissionDenied);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_from_u32() {
|
||||
let ok: io::Result<_> = Ok(1234u32);
|
||||
assert_eq!(into_neg_errno(ok), 1234);
|
||||
|
||||
let err: io::Result<u32> = Err(io::ErrorKind::PermissionDenied.into());
|
||||
assert_eq!(into_neg_errno(err), -1);
|
||||
|
||||
if cfg!(unix) {
|
||||
let os_err: io::Result<u32> = Err(io::Error::from_raw_os_error(10));
|
||||
assert_eq!(into_neg_errno(os_err), -10);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_i64() {
|
||||
assert_match!(into_io_result(1234i64), Ok(1234));
|
||||
|
||||
let err = into_io_result(-22i64).unwrap_err();
|
||||
#[cfg(unix)]
|
||||
assert_match!(err.raw_os_error(), Some(22));
|
||||
assert_match!(err.kind(), ErrorKind::InvalidInput);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_from_u64() {
|
||||
let ok: io::Result<_> = Ok(1234u64);
|
||||
assert_eq!(into_neg_errno(ok), 1234);
|
||||
|
||||
let err: io::Result<u64> = Err(io::ErrorKind::InvalidInput.into());
|
||||
assert_eq!(into_neg_errno(err), -22);
|
||||
|
||||
if cfg!(unix) {
|
||||
let os_err: io::Result<u64> = Err(io::Error::from_raw_os_error(6));
|
||||
assert_eq!(into_neg_errno(os_err), -6);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_isize() {
|
||||
assert_match!(into_io_result(1234isize), Ok(1234));
|
||||
|
||||
let err = into_io_result(-4isize).unwrap_err();
|
||||
#[cfg(unix)]
|
||||
assert_match!(err.raw_os_error(), Some(4));
|
||||
assert_match!(err.kind(), ErrorKind::Interrupted);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_from_unit() {
|
||||
let ok: io::Result<_> = Ok(());
|
||||
assert_eq!(into_neg_errno(ok), 0);
|
||||
|
||||
let err: io::Result<()> = Err(io::ErrorKind::OutOfMemory.into());
|
||||
assert_eq!(into_neg_errno(err), -12);
|
||||
|
||||
if cfg!(unix) {
|
||||
let os_err: io::Result<()> = Err(io::Error::from_raw_os_error(2));
|
||||
assert_eq!(into_neg_errno(os_err), -2);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -4,8 +4,7 @@
|
|||
|
||||
//! Bindings for interrupt sources
|
||||
|
||||
use core::ptr;
|
||||
use std::{ffi::CStr, marker::PhantomData, os::raw::c_int};
|
||||
use std::{ffi::CStr, marker::PhantomData, os::raw::c_int, ptr};
|
||||
|
||||
use crate::{
|
||||
bindings::{self, qemu_set_irq},
|
||||
|
|
|
@ -19,6 +19,7 @@ pub mod c_str;
|
|||
pub mod callbacks;
|
||||
pub mod cell;
|
||||
pub mod chardev;
|
||||
pub mod errno;
|
||||
pub mod irq;
|
||||
pub mod memory;
|
||||
pub mod module;
|
||||
|
|
|
@ -9,6 +9,8 @@ pub use crate::bitops::IntegerExt;
|
|||
pub use crate::cell::BqlCell;
|
||||
pub use crate::cell::BqlRefCell;
|
||||
|
||||
pub use crate::errno;
|
||||
|
||||
pub use crate::qdev::DeviceMethods;
|
||||
|
||||
pub use crate::qom::InterfaceType;
|
||||
|
|
|
@ -19,7 +19,7 @@ use crate::{
|
|||
chardev::Chardev,
|
||||
irq::InterruptSource,
|
||||
prelude::*,
|
||||
qom::{ClassInitImpl, ObjectClass, ObjectImpl, Owned},
|
||||
qom::{ObjectClass, ObjectImpl, Owned},
|
||||
vmstate::VMStateDescription,
|
||||
};
|
||||
|
||||
|
@ -86,7 +86,7 @@ unsafe extern "C" fn rust_resettable_exit_fn<T: ResettablePhasesImpl>(
|
|||
}
|
||||
|
||||
/// Trait providing the contents of [`DeviceClass`].
|
||||
pub trait DeviceImpl: ObjectImpl + ResettablePhasesImpl {
|
||||
pub trait DeviceImpl: ObjectImpl + ResettablePhasesImpl + IsA<DeviceState> {
|
||||
/// _Realization_ is the second stage of device creation. It contains
|
||||
/// all operations that depend on device properties and can fail (note:
|
||||
/// this is not yet supported for Rust devices).
|
||||
|
@ -113,7 +113,7 @@ pub trait DeviceImpl: ObjectImpl + ResettablePhasesImpl {
|
|||
/// # Safety
|
||||
///
|
||||
/// This function is only called through the QOM machinery and
|
||||
/// used by the `ClassInitImpl<DeviceClass>` trait.
|
||||
/// used by `DeviceClass::class_init`.
|
||||
/// We expect the FFI user of this function to pass a valid pointer that
|
||||
/// can be downcasted to type `T`. We also expect the device is
|
||||
/// readable/writeable from one thread at any time.
|
||||
|
@ -127,43 +127,41 @@ unsafe impl InterfaceType for ResettableClass {
|
|||
unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_RESETTABLE_INTERFACE) };
|
||||
}
|
||||
|
||||
impl<T> ClassInitImpl<ResettableClass> for T
|
||||
where
|
||||
T: ResettablePhasesImpl,
|
||||
{
|
||||
fn class_init(rc: &mut ResettableClass) {
|
||||
impl ResettableClass {
|
||||
/// Fill in the virtual methods of `ResettableClass` based on the
|
||||
/// definitions in the `ResettablePhasesImpl` trait.
|
||||
pub fn class_init<T: ResettablePhasesImpl>(&mut self) {
|
||||
if <T as ResettablePhasesImpl>::ENTER.is_some() {
|
||||
rc.phases.enter = Some(rust_resettable_enter_fn::<T>);
|
||||
self.phases.enter = Some(rust_resettable_enter_fn::<T>);
|
||||
}
|
||||
if <T as ResettablePhasesImpl>::HOLD.is_some() {
|
||||
rc.phases.hold = Some(rust_resettable_hold_fn::<T>);
|
||||
self.phases.hold = Some(rust_resettable_hold_fn::<T>);
|
||||
}
|
||||
if <T as ResettablePhasesImpl>::EXIT.is_some() {
|
||||
rc.phases.exit = Some(rust_resettable_exit_fn::<T>);
|
||||
self.phases.exit = Some(rust_resettable_exit_fn::<T>);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ClassInitImpl<DeviceClass> for T
|
||||
where
|
||||
T: ClassInitImpl<ObjectClass> + ClassInitImpl<ResettableClass> + DeviceImpl,
|
||||
{
|
||||
fn class_init(dc: &mut DeviceClass) {
|
||||
impl DeviceClass {
|
||||
/// Fill in the virtual methods of `DeviceClass` based on the definitions in
|
||||
/// the `DeviceImpl` trait.
|
||||
pub fn class_init<T: DeviceImpl>(&mut self) {
|
||||
if <T as DeviceImpl>::REALIZE.is_some() {
|
||||
dc.realize = Some(rust_realize_fn::<T>);
|
||||
self.realize = Some(rust_realize_fn::<T>);
|
||||
}
|
||||
if let Some(vmsd) = <T as DeviceImpl>::vmsd() {
|
||||
dc.vmsd = vmsd;
|
||||
self.vmsd = vmsd;
|
||||
}
|
||||
let prop = <T as DeviceImpl>::properties();
|
||||
if !prop.is_empty() {
|
||||
unsafe {
|
||||
bindings::device_class_set_props_n(dc, prop.as_ptr(), prop.len());
|
||||
bindings::device_class_set_props_n(self, prop.as_ptr(), prop.len());
|
||||
}
|
||||
}
|
||||
|
||||
ResettableClass::interface_init::<T, DeviceState>(dc);
|
||||
<T as ClassInitImpl<ObjectClass>>::class_init(&mut dc.parent_class);
|
||||
ResettableClass::cast::<DeviceState>(self).class_init::<T>();
|
||||
self.parent_class.class_init::<T>();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -37,11 +37,8 @@
|
|||
//! * a trait for virtual method implementations, for example `DeviceImpl`.
|
||||
//! Child classes implement this trait to provide their own behavior for
|
||||
//! virtual methods. The trait's methods take `&self` to access instance data.
|
||||
//!
|
||||
//! * an implementation of [`ClassInitImpl`], for example
|
||||
//! `ClassInitImpl<DeviceClass>`. This fills the vtable in the class struct;
|
||||
//! the source for this is the `*Impl` trait; the associated consts and
|
||||
//! functions if needed are wrapped to map C types into Rust types.
|
||||
//! The traits have the appropriate specialization of `IsA<>` as a supertrait,
|
||||
//! for example `IsA<DeviceState>` for `DeviceImpl`.
|
||||
//!
|
||||
//! * a trait for instance methods, for example `DeviceMethods`. This trait is
|
||||
//! automatically implemented for any reference or smart pointer to a device
|
||||
|
@ -52,6 +49,48 @@
|
|||
//! This provides access to class-wide functionality that doesn't depend on
|
||||
//! instance data. Like instance methods, these are automatically inherited by
|
||||
//! child classes.
|
||||
//!
|
||||
//! # Class structures
|
||||
//!
|
||||
//! Each QOM class that has virtual methods describes them in a
|
||||
//! _class struct_. Class structs include a parent field corresponding
|
||||
//! to the vtable of the parent class, all the way up to [`ObjectClass`].
|
||||
//!
|
||||
//! As mentioned above, virtual methods are defined via traits such as
|
||||
//! `DeviceImpl`. Class structs do not define any trait but, conventionally,
|
||||
//! all of them have a `class_init` method to initialize the virtual methods
|
||||
//! based on the trait and then call the same method on the superclass.
|
||||
//!
|
||||
//! ```ignore
|
||||
//! impl YourSubclassClass
|
||||
//! {
|
||||
//! pub fn class_init<T: YourSubclassImpl>(&mut self) {
|
||||
//! ...
|
||||
//! klass.parent_class::class_init<T>();
|
||||
//! }
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! If a class implements a QOM interface. In that case, the function must
|
||||
//! contain, for each interface, an extra forwarding call as follows:
|
||||
//!
|
||||
//! ```ignore
|
||||
//! ResettableClass::cast::<Self>(self).class_init::<Self>();
|
||||
//! ```
|
||||
//!
|
||||
//! These `class_init` functions are methods on the class rather than a trait,
|
||||
//! because the bound on `T` (`DeviceImpl` in this case), will change for every
|
||||
//! class struct. The functions are pointed to by the
|
||||
//! [`ObjectImpl::CLASS_INIT`] function pointer. While there is no default
|
||||
//! implementation, in most cases it will be enough to write it as follows:
|
||||
//!
|
||||
//! ```ignore
|
||||
//! const CLASS_INIT: fn(&mut Self::Class)> = Self::Class::class_init::<Self>;
|
||||
//! ```
|
||||
//!
|
||||
//! This design incurs a small amount of code duplication but, by not using
|
||||
//! traits, it allows the flexibility of implementing bindings in any crate,
|
||||
//! without incurring into violations of orphan rules for traits.
|
||||
|
||||
use std::{
|
||||
ffi::CStr,
|
||||
|
@ -178,7 +217,7 @@ unsafe extern "C" fn rust_instance_post_init<T: ObjectImpl>(obj: *mut Object) {
|
|||
T::INSTANCE_POST_INIT.unwrap()(unsafe { state.as_ref() });
|
||||
}
|
||||
|
||||
unsafe extern "C" fn rust_class_init<T: ObjectType + ClassInitImpl<T::Class>>(
|
||||
unsafe extern "C" fn rust_class_init<T: ObjectType + ObjectImpl>(
|
||||
klass: *mut ObjectClass,
|
||||
_data: *mut c_void,
|
||||
) {
|
||||
|
@ -188,7 +227,7 @@ unsafe extern "C" fn rust_class_init<T: ObjectType + ClassInitImpl<T::Class>>(
|
|||
// SAFETY: klass is a T::Class, since rust_class_init<T>
|
||||
// is called from QOM core as the class_init function
|
||||
// for class T
|
||||
T::class_init(unsafe { klass.as_mut() })
|
||||
<T as ObjectImpl>::CLASS_INIT(unsafe { klass.as_mut() })
|
||||
}
|
||||
|
||||
unsafe extern "C" fn drop_object<T: ObjectImpl>(obj: *mut Object) {
|
||||
|
@ -277,19 +316,25 @@ pub unsafe trait InterfaceType: Sized {
|
|||
/// for this interface.
|
||||
const TYPE_NAME: &'static CStr;
|
||||
|
||||
/// Initialize the vtable for the interface; the generic argument `T` is the
|
||||
/// type being initialized, while the generic argument `U` is the type that
|
||||
/// Return the vtable for the interface; `U` is the type that
|
||||
/// lists the interface in its `TypeInfo`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// This function is usually called by a `class_init` method in `U::Class`.
|
||||
/// For example, `DeviceClass::class_init<T>` initializes its `Resettable`
|
||||
/// interface as follows:
|
||||
///
|
||||
/// ```ignore
|
||||
/// ResettableClass::cast::<DeviceState>(self).class_init::<T>();
|
||||
/// ```
|
||||
///
|
||||
/// where `T` is the concrete subclass that is being initialized.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panic if the incoming argument if `T` does not implement the interface.
|
||||
fn interface_init<
|
||||
T: ObjectType + ClassInitImpl<Self> + ClassInitImpl<U::Class>,
|
||||
U: ObjectType,
|
||||
>(
|
||||
klass: &mut U::Class,
|
||||
) {
|
||||
fn cast<U: ObjectType>(klass: &mut U::Class) -> &mut Self {
|
||||
unsafe {
|
||||
// SAFETY: upcasting to ObjectClass is always valid, and the
|
||||
// return type is either NULL or the argument itself
|
||||
|
@ -298,8 +343,7 @@ pub unsafe trait InterfaceType: Sized {
|
|||
Self::TYPE_NAME.as_ptr(),
|
||||
)
|
||||
.cast();
|
||||
|
||||
<T as ClassInitImpl<Self>>::class_init(result.as_mut().unwrap())
|
||||
result.as_mut().unwrap()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -497,7 +541,7 @@ impl<T: ObjectType> ObjectDeref for &mut T {}
|
|||
impl<T: ObjectType> ObjectCastMut for &mut T {}
|
||||
|
||||
/// Trait a type must implement to be registered with QEMU.
|
||||
pub trait ObjectImpl: ObjectType + ClassInitImpl<Self::Class> {
|
||||
pub trait ObjectImpl: ObjectType + IsA<Object> {
|
||||
/// The parent of the type. This should match the first field of the
|
||||
/// struct that implements `ObjectImpl`, minus the `ParentField<_>` wrapper.
|
||||
type ParentType: ObjectType;
|
||||
|
@ -550,85 +594,26 @@ pub trait ObjectImpl: ObjectType + ClassInitImpl<Self::Class> {
|
|||
|
||||
// methods on ObjectClass
|
||||
const UNPARENT: Option<fn(&Self)> = None;
|
||||
}
|
||||
|
||||
/// Internal trait used to automatically fill in a class struct.
|
||||
///
|
||||
/// Each QOM class that has virtual methods describes them in a
|
||||
/// _class struct_. Class structs include a parent field corresponding
|
||||
/// to the vtable of the parent class, all the way up to [`ObjectClass`].
|
||||
/// Each QOM type has one such class struct; this trait takes care of
|
||||
/// initializing the `T` part of the class struct, for the type that
|
||||
/// implements the trait.
|
||||
///
|
||||
/// Each struct will implement this trait with `T` equal to each
|
||||
/// superclass. For example, a device should implement at least
|
||||
/// `ClassInitImpl<`[`DeviceClass`](crate::qdev::DeviceClass)`>` and
|
||||
/// `ClassInitImpl<`[`ObjectClass`]`>`. Such implementations are made
|
||||
/// in one of two ways.
|
||||
///
|
||||
/// For most superclasses, `ClassInitImpl` is provided by the `qemu-api`
|
||||
/// crate itself. The Rust implementation of methods will come from a
|
||||
/// trait like [`ObjectImpl`] or [`DeviceImpl`](crate::qdev::DeviceImpl),
|
||||
/// and `ClassInitImpl` is provided by blanket implementations that
|
||||
/// operate on all implementors of the `*Impl`* trait. For example:
|
||||
///
|
||||
/// ```ignore
|
||||
/// impl<T> ClassInitImpl<DeviceClass> for T
|
||||
/// where
|
||||
/// T: ClassInitImpl<ObjectClass> + DeviceImpl,
|
||||
/// ```
|
||||
///
|
||||
/// The bound on `ClassInitImpl<ObjectClass>` is needed so that,
|
||||
/// after initializing the `DeviceClass` part of the class struct,
|
||||
/// the parent [`ObjectClass`] is initialized as well.
|
||||
///
|
||||
/// The other case is when manual implementation of the trait is needed.
|
||||
/// This covers the following cases:
|
||||
///
|
||||
/// * if a class implements a QOM interface, the Rust code _has_ to define its
|
||||
/// own class struct `FooClass` and implement `ClassInitImpl<FooClass>`.
|
||||
/// `ClassInitImpl<FooClass>`'s `class_init` method will then forward to
|
||||
/// multiple other `class_init`s, for the interfaces as well as the
|
||||
/// superclass. (Note that there is no Rust example yet for using interfaces).
|
||||
///
|
||||
/// * for classes implemented outside the ``qemu-api`` crate, it's not possible
|
||||
/// to add blanket implementations like the above one, due to orphan rules. In
|
||||
/// that case, the easiest solution is to implement
|
||||
/// `ClassInitImpl<YourSuperclass>` for each subclass and not have a
|
||||
/// `YourSuperclassImpl` trait at all.
|
||||
///
|
||||
/// ```ignore
|
||||
/// impl ClassInitImpl<YourSuperclass> for YourSubclass {
|
||||
/// fn class_init(klass: &mut YourSuperclass) {
|
||||
/// klass.some_method = Some(Self::some_method);
|
||||
/// <Self as ClassInitImpl<SysBusDeviceClass>>::class_init(&mut klass.parent_class);
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// While this method incurs a small amount of code duplication,
|
||||
/// it is generally limited to the recursive call on the last line.
|
||||
/// This is because classes defined in Rust do not need the same
|
||||
/// glue code that is needed when the classes are defined in C code.
|
||||
/// You may consider using a macro if you have many subclasses.
|
||||
pub trait ClassInitImpl<T> {
|
||||
/// Initialize `klass` to point to the virtual method implementations
|
||||
/// Store into the argument the virtual method implementations
|
||||
/// for `Self`. On entry, the virtual method pointers are set to
|
||||
/// the default values coming from the parent classes; the function
|
||||
/// can change them to override virtual methods of a parent class.
|
||||
///
|
||||
/// The virtual method implementations usually come from another
|
||||
/// trait, for example [`DeviceImpl`](crate::qdev::DeviceImpl)
|
||||
/// when `T` is [`DeviceClass`](crate::qdev::DeviceClass).
|
||||
/// Usually defined simply as `Self::Class::class_init::<Self>`;
|
||||
/// however a default implementation cannot be included here, because the
|
||||
/// bounds that the `Self::Class::class_init` method places on `Self` are
|
||||
/// not known in advance.
|
||||
///
|
||||
/// On entry, `klass`'s parent class is initialized, while the other fields
|
||||
/// # Safety
|
||||
///
|
||||
/// While `klass`'s parent class is initialized on entry, the other fields
|
||||
/// are all zero; it is therefore assumed that all fields in `T` can be
|
||||
/// zeroed, otherwise it would not be possible to provide the class as a
|
||||
/// `&mut T`. TODO: add a bound of [`Zeroable`](crate::zeroable::Zeroable)
|
||||
/// to T; this is more easily done once Zeroable does not require a manual
|
||||
/// implementation (Rust 1.75.0).
|
||||
fn class_init(klass: &mut T);
|
||||
const CLASS_INIT: fn(&mut Self::Class);
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
|
@ -641,13 +626,12 @@ unsafe extern "C" fn rust_unparent_fn<T: ObjectImpl>(dev: *mut Object) {
|
|||
T::UNPARENT.unwrap()(unsafe { state.as_ref() });
|
||||
}
|
||||
|
||||
impl<T> ClassInitImpl<ObjectClass> for T
|
||||
where
|
||||
T: ObjectImpl,
|
||||
{
|
||||
fn class_init(oc: &mut ObjectClass) {
|
||||
impl ObjectClass {
|
||||
/// Fill in the virtual methods of `ObjectClass` based on the definitions in
|
||||
/// the `ObjectImpl` trait.
|
||||
pub fn class_init<T: ObjectImpl>(&mut self) {
|
||||
if <T as ObjectImpl>::UNPARENT.is_some() {
|
||||
oc.unparent = Some(rust_unparent_fn::<T>);
|
||||
self.unparent = Some(rust_unparent_fn::<T>);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,8 +14,8 @@ use crate::{
|
|||
irq::{IRQState, InterruptSource},
|
||||
memory::MemoryRegion,
|
||||
prelude::*,
|
||||
qdev::{DeviceClass, DeviceState},
|
||||
qom::{ClassInitImpl, Owned},
|
||||
qdev::{DeviceImpl, DeviceState},
|
||||
qom::Owned,
|
||||
};
|
||||
|
||||
unsafe impl ObjectType for SysBusDevice {
|
||||
|
@ -25,13 +25,14 @@ unsafe impl ObjectType for SysBusDevice {
|
|||
}
|
||||
qom_isa!(SysBusDevice: DeviceState, Object);
|
||||
|
||||
// TODO: add SysBusDeviceImpl
|
||||
impl<T> ClassInitImpl<SysBusDeviceClass> for T
|
||||
where
|
||||
T: ClassInitImpl<DeviceClass>,
|
||||
{
|
||||
fn class_init(sdc: &mut SysBusDeviceClass) {
|
||||
<T as ClassInitImpl<DeviceClass>>::class_init(&mut sdc.parent_class);
|
||||
// TODO: add virtual methods
|
||||
pub trait SysBusDeviceImpl: DeviceImpl + IsA<SysBusDevice> {}
|
||||
|
||||
impl SysBusDeviceClass {
|
||||
/// Fill in the virtual methods of `SysBusDeviceClass` based on the
|
||||
/// definitions in the `SysBusDeviceImpl` trait.
|
||||
pub fn class_init<T: SysBusDeviceImpl>(self: &mut SysBusDeviceClass) {
|
||||
self.parent_class.class_init::<T>();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -8,13 +8,14 @@ use std::{
|
|||
};
|
||||
|
||||
use qemu_api::{
|
||||
bindings::*,
|
||||
bindings::{module_call_init, module_init_type, object_new, object_unref, qdev_prop_bool},
|
||||
c_str,
|
||||
cell::{self, BqlCell},
|
||||
declare_properties, define_property,
|
||||
prelude::*,
|
||||
qdev::{DeviceClass, DeviceImpl, DeviceState, Property, ResettablePhasesImpl},
|
||||
qom::{ClassInitImpl, ObjectImpl, ParentField},
|
||||
qdev::{DeviceImpl, DeviceState, Property, ResettablePhasesImpl},
|
||||
qom::{ObjectImpl, ParentField},
|
||||
sysbus::SysBusDevice,
|
||||
vmstate::VMStateDescription,
|
||||
zeroable::Zeroable,
|
||||
};
|
||||
|
@ -40,6 +41,12 @@ pub struct DummyClass {
|
|||
parent_class: <DeviceState as ObjectType>::Class,
|
||||
}
|
||||
|
||||
impl DummyClass {
|
||||
pub fn class_init<T: DeviceImpl>(self: &mut DummyClass) {
|
||||
self.parent_class.class_init::<T>();
|
||||
}
|
||||
}
|
||||
|
||||
declare_properties! {
|
||||
DUMMY_PROPERTIES,
|
||||
define_property!(
|
||||
|
@ -59,6 +66,7 @@ unsafe impl ObjectType for DummyState {
|
|||
impl ObjectImpl for DummyState {
|
||||
type ParentType = DeviceState;
|
||||
const ABSTRACT: bool = false;
|
||||
const CLASS_INIT: fn(&mut DummyClass) = DummyClass::class_init::<Self>;
|
||||
}
|
||||
|
||||
impl ResettablePhasesImpl for DummyState {}
|
||||
|
@ -72,14 +80,6 @@ impl DeviceImpl for DummyState {
|
|||
}
|
||||
}
|
||||
|
||||
// `impl<T> ClassInitImpl<DummyClass> for T` doesn't work since it violates
|
||||
// orphan rule.
|
||||
impl ClassInitImpl<DummyClass> for DummyState {
|
||||
fn class_init(klass: &mut DummyClass) {
|
||||
<Self as ClassInitImpl<DeviceClass>>::class_init(&mut klass.parent_class);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(qemu_api_macros::offsets)]
|
||||
#[repr(C)]
|
||||
#[derive(qemu_api_macros::Object)]
|
||||
|
@ -101,20 +101,15 @@ unsafe impl ObjectType for DummyChildState {
|
|||
impl ObjectImpl for DummyChildState {
|
||||
type ParentType = DummyState;
|
||||
const ABSTRACT: bool = false;
|
||||
const CLASS_INIT: fn(&mut DummyChildClass) = DummyChildClass::class_init::<Self>;
|
||||
}
|
||||
|
||||
impl ResettablePhasesImpl for DummyChildState {}
|
||||
impl DeviceImpl for DummyChildState {}
|
||||
|
||||
impl ClassInitImpl<DummyClass> for DummyChildState {
|
||||
fn class_init(klass: &mut DummyClass) {
|
||||
<Self as ClassInitImpl<DeviceClass>>::class_init(&mut klass.parent_class);
|
||||
}
|
||||
}
|
||||
|
||||
impl ClassInitImpl<DummyChildClass> for DummyChildState {
|
||||
fn class_init(klass: &mut DummyChildClass) {
|
||||
<Self as ClassInitImpl<DummyClass>>::class_init(&mut klass.parent_class);
|
||||
impl DummyChildClass {
|
||||
pub fn class_init<T: DeviceImpl>(self: &mut DummyChildClass) {
|
||||
self.parent_class.class_init::<T>();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ sub_file="${sub_tdir}/submodule.tar"
|
|||
# different to the host OS.
|
||||
subprojects="keycodemapdb libvfio-user berkeley-softfloat-3
|
||||
berkeley-testfloat-3 arbitrary-int-1-rs bilge-0.2-rs
|
||||
bilge-impl-0.2-rs either-1-rs itertools-0.11-rs proc-macro2-1-rs
|
||||
bilge-impl-0.2-rs either-1-rs itertools-0.11-rs libc-0.2-rs proc-macro2-1-rs
|
||||
proc-macro-error-1-rs proc-macro-error-attr-1-rs quote-1-rs
|
||||
syn-2-rs unicode-ident-1-rs"
|
||||
sub_deinit=""
|
||||
|
|
|
@ -41,7 +41,7 @@ fi
|
|||
# Only include wraps that are invoked with subproject()
|
||||
SUBPROJECTS="libvfio-user keycodemapdb berkeley-softfloat-3
|
||||
berkeley-testfloat-3 arbitrary-int-1-rs bilge-0.2-rs
|
||||
bilge-impl-0.2-rs either-1-rs itertools-0.11-rs proc-macro2-1-rs
|
||||
bilge-impl-0.2-rs either-1-rs itertools-0.11-rs libc-0.2-rs proc-macro2-1-rs
|
||||
proc-macro-error-1-rs proc-macro-error-attr-1-rs quote-1-rs
|
||||
syn-2-rs unicode-ident-1-rs"
|
||||
|
||||
|
|
|
@ -168,6 +168,7 @@ meson_options_help() {
|
|||
printf "%s\n" ' pixman pixman support'
|
||||
printf "%s\n" ' plugins TCG plugins via shared library loading'
|
||||
printf "%s\n" ' png PNG support with libpng'
|
||||
printf "%s\n" ' pvg macOS paravirtualized graphics support'
|
||||
printf "%s\n" ' qatzip QATzip compression support'
|
||||
printf "%s\n" ' qcow1 qcow1 image format support'
|
||||
printf "%s\n" ' qed qed image format support'
|
||||
|
@ -436,6 +437,8 @@ _meson_option_parse() {
|
|||
--enable-png) printf "%s" -Dpng=enabled ;;
|
||||
--disable-png) printf "%s" -Dpng=disabled ;;
|
||||
--prefix=*) quote_sh "-Dprefix=$2" ;;
|
||||
--enable-pvg) printf "%s" -Dpvg=enabled ;;
|
||||
--disable-pvg) printf "%s" -Dpvg=disabled ;;
|
||||
--enable-qatzip) printf "%s" -Dqatzip=enabled ;;
|
||||
--disable-qatzip) printf "%s" -Dqatzip=disabled ;;
|
||||
--enable-qcow1) printf "%s" -Dqcow1=enabled ;;
|
||||
|
|
|
@ -62,7 +62,7 @@ if have_user
|
|||
stub_ss.add(files('qdev.c'))
|
||||
endif
|
||||
|
||||
stub_ss.add(files('monitor-fd.c'))
|
||||
stub_ss.add(files('monitor-internal.c'))
|
||||
endif
|
||||
|
||||
if have_system
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "monitor/monitor.h"
|
||||
|
||||
int monitor_get_fd(Monitor *mon, const char *fdname, Error **errp)
|
||||
{
|
||||
abort();
|
||||
}
|
1
subprojects/.gitignore
vendored
1
subprojects/.gitignore
vendored
|
@ -11,6 +11,7 @@
|
|||
/bilge-impl-0.2.0
|
||||
/either-1.12.0
|
||||
/itertools-0.11.0
|
||||
/libc-0.2.162
|
||||
/proc-macro-error-1.0.4
|
||||
/proc-macro-error-attr-1.0.4
|
||||
/proc-macro2-1.0.84
|
||||
|
|
7
subprojects/libc-0.2-rs.wrap
Normal file
7
subprojects/libc-0.2-rs.wrap
Normal file
|
@ -0,0 +1,7 @@
|
|||
[wrap-file]
|
||||
directory = libc-0.2.162
|
||||
source_url = https://crates.io/api/v1/crates/libc/0.2.162/download
|
||||
source_filename = libc-0.2.162.tar.gz
|
||||
source_hash = 18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398
|
||||
#method = cargo
|
||||
patch_directory = libc-0.2-rs
|
37
subprojects/packagefiles/libc-0.2-rs/meson.build
Normal file
37
subprojects/packagefiles/libc-0.2-rs/meson.build
Normal file
|
@ -0,0 +1,37 @@
|
|||
project('libc-0.2-rs', 'rust',
|
||||
meson_version: '>=1.5.0',
|
||||
version: '0.2.162',
|
||||
license: 'MIT OR Apache-2.0',
|
||||
default_options: [])
|
||||
|
||||
_libc_rs = static_library(
|
||||
'libc',
|
||||
files('src/lib.rs'),
|
||||
gnu_symbol_visibility: 'hidden',
|
||||
override_options: ['rust_std=2015', 'build.rust_std=2015'],
|
||||
rust_abi: 'rust',
|
||||
rust_args: [
|
||||
'--cap-lints', 'allow',
|
||||
'--cfg', 'freebsd11',
|
||||
'--cfg', 'libc_priv_mod_use',
|
||||
'--cfg', 'libc_union',
|
||||
'--cfg', 'libc_const_size_of',
|
||||
'--cfg', 'libc_align',
|
||||
'--cfg', 'libc_int128',
|
||||
'--cfg', 'libc_core_cvoid',
|
||||
'--cfg', 'libc_packedN',
|
||||
'--cfg', 'libc_cfg_target_vendor',
|
||||
'--cfg', 'libc_non_exhaustive',
|
||||
'--cfg', 'libc_long_array',
|
||||
'--cfg', 'libc_ptr_addr_of',
|
||||
'--cfg', 'libc_underscore_const_names',
|
||||
'--cfg', 'libc_const_extern_fn',
|
||||
],
|
||||
dependencies: [],
|
||||
)
|
||||
|
||||
libc_dep = declare_dependency(
|
||||
link_with: _libc_rs,
|
||||
)
|
||||
|
||||
meson.override_dependency('libc-0.2-rs', libc_dep)
|
|
@ -1882,7 +1882,11 @@ static void ram_block_add(RAMBlock *new_block, Error **errp)
|
|||
if (new_block->flags & RAM_GUEST_MEMFD) {
|
||||
int ret;
|
||||
|
||||
assert(kvm_enabled());
|
||||
if (!kvm_enabled()) {
|
||||
error_setg(errp, "cannot set up private guest memory for %s: KVM required",
|
||||
object_get_typename(OBJECT(current_machine->cgs)));
|
||||
goto out_free;
|
||||
}
|
||||
assert(new_block->guest_memfd < 0);
|
||||
|
||||
ret = ram_block_discard_require(true);
|
||||
|
|
|
@ -247,6 +247,9 @@ static uint32_t max_thread_ids_for_cache(X86CPUTopoInfo *topo_info,
|
|||
case CPU_TOPOLOGY_LEVEL_CORE:
|
||||
num_ids = 1 << apicid_core_offset(topo_info);
|
||||
break;
|
||||
case CPU_TOPOLOGY_LEVEL_MODULE:
|
||||
num_ids = 1 << apicid_module_offset(topo_info);
|
||||
break;
|
||||
case CPU_TOPOLOGY_LEVEL_DIE:
|
||||
num_ids = 1 << apicid_die_offset(topo_info);
|
||||
break;
|
||||
|
@ -255,7 +258,7 @@ static uint32_t max_thread_ids_for_cache(X86CPUTopoInfo *topo_info,
|
|||
break;
|
||||
default:
|
||||
/*
|
||||
* Currently there is no use case for THREAD and MODULE, so use
|
||||
* Currently there is no use case for THREAD, so use
|
||||
* assert directly to facilitate debugging.
|
||||
*/
|
||||
g_assert_not_reached();
|
||||
|
@ -5495,6 +5498,130 @@ static const X86CPUDefinition builtin_x86_defs[] = {
|
|||
.model_id = "AMD EPYC-Genoa Processor",
|
||||
.cache_info = &epyc_genoa_cache_info,
|
||||
},
|
||||
{
|
||||
.name = "YongFeng",
|
||||
.level = 0x1F,
|
||||
.vendor = CPUID_VENDOR_ZHAOXIN1,
|
||||
.family = 7,
|
||||
.model = 11,
|
||||
.stepping = 3,
|
||||
/* missing: CPUID_HT, CPUID_TM, CPUID_PBE */
|
||||
.features[FEAT_1_EDX] =
|
||||
CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_ACPI | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
|
||||
CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
|
||||
CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
|
||||
CPUID_PSE | CPUID_DE | CPUID_VME | CPUID_FP87,
|
||||
/*
|
||||
* missing: CPUID_EXT_OSXSAVE, CPUID_EXT_XTPR, CPUID_EXT_TM2,
|
||||
* CPUID_EXT_EST, CPUID_EXT_SMX, CPUID_EXT_VMX
|
||||
*/
|
||||
.features[FEAT_1_ECX] =
|
||||
CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
|
||||
CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_TSC_DEADLINE_TIMER |
|
||||
CPUID_EXT_POPCNT | CPUID_EXT_MOVBE | CPUID_EXT_X2APIC |
|
||||
CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | CPUID_EXT_PCID |
|
||||
CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
|
||||
CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
|
||||
.features[FEAT_7_0_EBX] =
|
||||
CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_ADX |
|
||||
CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_BMI2 |
|
||||
CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_BMI1 |
|
||||
CPUID_7_0_EBX_FSGSBASE,
|
||||
/* missing: CPUID_7_0_ECX_OSPKE */
|
||||
.features[FEAT_7_0_ECX] =
|
||||
CPUID_7_0_ECX_RDPID | CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_UMIP,
|
||||
.features[FEAT_7_0_EDX] =
|
||||
CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL,
|
||||
.features[FEAT_8000_0001_EDX] =
|
||||
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
|
||||
CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
|
||||
.features[FEAT_8000_0001_ECX] =
|
||||
CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
|
||||
.features[FEAT_8000_0007_EDX] = CPUID_APM_INVTSC,
|
||||
/*
|
||||
* TODO: When the Linux kernel introduces other existing definitions
|
||||
* for this leaf, remember to update the definitions here.
|
||||
*/
|
||||
.features[FEAT_C000_0001_EDX] =
|
||||
CPUID_C000_0001_EDX_PMM_EN | CPUID_C000_0001_EDX_PMM |
|
||||
CPUID_C000_0001_EDX_PHE_EN | CPUID_C000_0001_EDX_PHE |
|
||||
CPUID_C000_0001_EDX_ACE2 |
|
||||
CPUID_C000_0001_EDX_XCRYPT_EN | CPUID_C000_0001_EDX_XCRYPT |
|
||||
CPUID_C000_0001_EDX_XSTORE_EN | CPUID_C000_0001_EDX_XSTORE,
|
||||
.features[FEAT_XSAVE] =
|
||||
CPUID_XSAVE_XSAVEOPT,
|
||||
.features[FEAT_ARCH_CAPABILITIES] =
|
||||
MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY |
|
||||
MSR_ARCH_CAP_MDS_NO | MSR_ARCH_CAP_PSCHANGE_MC_NO |
|
||||
MSR_ARCH_CAP_SSB_NO,
|
||||
.features[FEAT_VMX_PROCBASED_CTLS] =
|
||||
VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_HLT_EXITING |
|
||||
VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_INVLPG_EXITING |
|
||||
VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING |
|
||||
VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_CR3_LOAD_EXITING |
|
||||
VMX_CPU_BASED_CR3_STORE_EXITING | VMX_CPU_BASED_CR8_LOAD_EXITING |
|
||||
VMX_CPU_BASED_CR8_STORE_EXITING | VMX_CPU_BASED_TPR_SHADOW |
|
||||
VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_MOV_DR_EXITING |
|
||||
VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
|
||||
VMX_CPU_BASED_MONITOR_TRAP_FLAG | VMX_CPU_BASED_USE_MSR_BITMAPS |
|
||||
VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
|
||||
VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
|
||||
/*
|
||||
* missing: VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING,
|
||||
* VMX_SECONDARY_EXEC_TSC_SCALING
|
||||
*/
|
||||
.features[FEAT_VMX_SECONDARY_CTLS] =
|
||||
VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
||||
VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC |
|
||||
VMX_SECONDARY_EXEC_RDTSCP | VMX_SECONDARY_EXEC_ENABLE_VPID |
|
||||
VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
|
||||
VMX_SECONDARY_EXEC_WBINVD_EXITING |
|
||||
VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
|
||||
VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
||||
VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
||||
VMX_SECONDARY_EXEC_RDRAND_EXITING |
|
||||
VMX_SECONDARY_EXEC_ENABLE_INVPCID |
|
||||
VMX_SECONDARY_EXEC_ENABLE_VMFUNC |
|
||||
VMX_SECONDARY_EXEC_SHADOW_VMCS |
|
||||
VMX_SECONDARY_EXEC_ENABLE_PML,
|
||||
.features[FEAT_VMX_PINBASED_CTLS] =
|
||||
VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING |
|
||||
VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER |
|
||||
VMX_PIN_BASED_POSTED_INTR,
|
||||
.features[FEAT_VMX_EXIT_CTLS] =
|
||||
VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE |
|
||||
VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
|
||||
VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_IA32_PAT |
|
||||
VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
|
||||
VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
|
||||
/* missing: VMX_VM_ENTRY_SMM, VMX_VM_ENTRY_DEACT_DUAL_MONITOR */
|
||||
.features[FEAT_VMX_ENTRY_CTLS] =
|
||||
VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_IA32E_MODE |
|
||||
VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL |
|
||||
VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_IA32_EFER,
|
||||
/*
|
||||
* missing: MSR_VMX_MISC_ACTIVITY_SHUTDOWN,
|
||||
* MSR_VMX_MISC_ACTIVITY_WAIT_SIPI
|
||||
*/
|
||||
.features[FEAT_VMX_MISC] =
|
||||
MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_ACTIVITY_HLT |
|
||||
MSR_VMX_MISC_VMWRITE_VMEXIT,
|
||||
/* missing: MSR_VMX_EPT_UC */
|
||||
.features[FEAT_VMX_EPT_VPID_CAPS] =
|
||||
MSR_VMX_EPT_EXECONLY | MSR_VMX_EPT_PAGE_WALK_LENGTH_4 |
|
||||
MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB |
|
||||
MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_AD_BITS |
|
||||
MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
|
||||
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID |
|
||||
MSR_VMX_EPT_INVVPID_ALL_CONTEXT | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
|
||||
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
|
||||
.features[FEAT_VMX_BASIC] =
|
||||
MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS,
|
||||
.features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
|
||||
.xlevel = 0x80000008,
|
||||
.model_id = "Zhaoxin YongFeng Processor",
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -7677,9 +7804,10 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
|
|||
/*
|
||||
* The Linux kernel checks for the CMPLegacy bit and
|
||||
* discards multiple thread information if it is set.
|
||||
* So don't set it here for Intel to make Linux guests happy.
|
||||
* So don't set it here for Intel (and other processors
|
||||
* following Intel's behavior) to make Linux guests happy.
|
||||
*/
|
||||
if (!IS_INTEL_CPU(env)) {
|
||||
if (!IS_INTEL_CPU(env) && !IS_ZHAOXIN_CPU(env)) {
|
||||
env->features[FEAT_8000_0001_ECX] |= CPUID_EXT3_CMP_LEG;
|
||||
}
|
||||
}
|
||||
|
@ -7903,6 +8031,64 @@ static void x86_cpu_hyperv_realize(X86CPU *cpu)
|
|||
cpu->hyperv_limits[2] = 0;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static bool x86_cpu_update_smp_cache_topo(MachineState *ms, X86CPU *cpu,
|
||||
Error **errp)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
CpuTopologyLevel level;
|
||||
|
||||
level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1D);
|
||||
if (level != CPU_TOPOLOGY_LEVEL_DEFAULT) {
|
||||
env->cache_info_cpuid4.l1d_cache->share_level = level;
|
||||
env->cache_info_amd.l1d_cache->share_level = level;
|
||||
} else {
|
||||
machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1D,
|
||||
env->cache_info_cpuid4.l1d_cache->share_level);
|
||||
machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1D,
|
||||
env->cache_info_amd.l1d_cache->share_level);
|
||||
}
|
||||
|
||||
level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1I);
|
||||
if (level != CPU_TOPOLOGY_LEVEL_DEFAULT) {
|
||||
env->cache_info_cpuid4.l1i_cache->share_level = level;
|
||||
env->cache_info_amd.l1i_cache->share_level = level;
|
||||
} else {
|
||||
machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1I,
|
||||
env->cache_info_cpuid4.l1i_cache->share_level);
|
||||
machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1I,
|
||||
env->cache_info_amd.l1i_cache->share_level);
|
||||
}
|
||||
|
||||
level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L2);
|
||||
if (level != CPU_TOPOLOGY_LEVEL_DEFAULT) {
|
||||
env->cache_info_cpuid4.l2_cache->share_level = level;
|
||||
env->cache_info_amd.l2_cache->share_level = level;
|
||||
} else {
|
||||
machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L2,
|
||||
env->cache_info_cpuid4.l2_cache->share_level);
|
||||
machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L2,
|
||||
env->cache_info_amd.l2_cache->share_level);
|
||||
}
|
||||
|
||||
level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L3);
|
||||
if (level != CPU_TOPOLOGY_LEVEL_DEFAULT) {
|
||||
env->cache_info_cpuid4.l3_cache->share_level = level;
|
||||
env->cache_info_amd.l3_cache->share_level = level;
|
||||
} else {
|
||||
machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L3,
|
||||
env->cache_info_cpuid4.l3_cache->share_level);
|
||||
machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L3,
|
||||
env->cache_info_amd.l3_cache->share_level);
|
||||
}
|
||||
|
||||
if (!machine_check_smp_cache(ms, errp)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
|
||||
{
|
||||
CPUState *cs = CPU(dev);
|
||||
|
@ -8142,6 +8328,14 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
|
|||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
MachineState *ms = MACHINE(qdev_get_machine());
|
||||
MachineClass *mc = MACHINE_GET_CLASS(ms);
|
||||
|
||||
if (mc->smp_props.has_caches) {
|
||||
if (!x86_cpu_update_smp_cache_topo(ms, cpu, errp)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
|
||||
|
||||
if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) {
|
||||
|
|
|
@ -1110,6 +1110,27 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
|
|||
/* CPUID[0x80000007].EDX flags: */
|
||||
#define CPUID_APM_INVTSC (1U << 8)
|
||||
|
||||
/* "rng" RNG present (xstore) */
|
||||
#define CPUID_C000_0001_EDX_XSTORE (1U << 2)
|
||||
/* "rng_en" RNG enabled */
|
||||
#define CPUID_C000_0001_EDX_XSTORE_EN (1U << 3)
|
||||
/* "ace" on-CPU crypto (xcrypt) */
|
||||
#define CPUID_C000_0001_EDX_XCRYPT (1U << 6)
|
||||
/* "ace_en" on-CPU crypto enabled */
|
||||
#define CPUID_C000_0001_EDX_XCRYPT_EN (1U << 7)
|
||||
/* Advanced Cryptography Engine v2 */
|
||||
#define CPUID_C000_0001_EDX_ACE2 (1U << 8)
|
||||
/* ACE v2 enabled */
|
||||
#define CPUID_C000_0001_EDX_ACE2_EN (1U << 9)
|
||||
/* PadLock Hash Engine */
|
||||
#define CPUID_C000_0001_EDX_PHE (1U << 10)
|
||||
/* PHE enabled */
|
||||
#define CPUID_C000_0001_EDX_PHE_EN (1U << 11)
|
||||
/* PadLock Montgomery Multiplier */
|
||||
#define CPUID_C000_0001_EDX_PMM (1U << 12)
|
||||
/* PMM enabled */
|
||||
#define CPUID_C000_0001_EDX_PMM_EN (1U << 13)
|
||||
|
||||
#define CPUID_VENDOR_SZ 12
|
||||
|
||||
#define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
|
||||
|
@ -1122,7 +1143,16 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
|
|||
#define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */
|
||||
#define CPUID_VENDOR_AMD "AuthenticAMD"
|
||||
|
||||
#define CPUID_VENDOR_VIA "CentaurHauls"
|
||||
#define CPUID_VENDOR_ZHAOXIN1_1 0x746E6543 /* "Cent" */
|
||||
#define CPUID_VENDOR_ZHAOXIN1_2 0x48727561 /* "aurH" */
|
||||
#define CPUID_VENDOR_ZHAOXIN1_3 0x736C7561 /* "auls" */
|
||||
|
||||
#define CPUID_VENDOR_ZHAOXIN2_1 0x68532020 /* " Sh" */
|
||||
#define CPUID_VENDOR_ZHAOXIN2_2 0x68676E61 /* "angh" */
|
||||
#define CPUID_VENDOR_ZHAOXIN2_3 0x20206961 /* "ai " */
|
||||
|
||||
#define CPUID_VENDOR_ZHAOXIN1 "CentaurHauls"
|
||||
#define CPUID_VENDOR_ZHAOXIN2 " Shanghai "
|
||||
|
||||
#define CPUID_VENDOR_HYGON "HygonGenuine"
|
||||
|
||||
|
@ -1132,6 +1162,15 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
|
|||
#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
|
||||
(env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
|
||||
(env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
|
||||
#define IS_ZHAOXIN1_CPU(env) \
|
||||
((env)->cpuid_vendor1 == CPUID_VENDOR_ZHAOXIN1_1 && \
|
||||
(env)->cpuid_vendor2 == CPUID_VENDOR_ZHAOXIN1_2 && \
|
||||
(env)->cpuid_vendor3 == CPUID_VENDOR_ZHAOXIN1_3)
|
||||
#define IS_ZHAOXIN2_CPU(env) \
|
||||
((env)->cpuid_vendor1 == CPUID_VENDOR_ZHAOXIN2_1 && \
|
||||
(env)->cpuid_vendor2 == CPUID_VENDOR_ZHAOXIN2_2 && \
|
||||
(env)->cpuid_vendor3 == CPUID_VENDOR_ZHAOXIN2_3)
|
||||
#define IS_ZHAOXIN_CPU(env) (IS_ZHAOXIN1_CPU(env) || IS_ZHAOXIN2_CPU(env))
|
||||
|
||||
#define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */
|
||||
#define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */
|
||||
|
|
|
@ -18,7 +18,9 @@
|
|||
|
||||
uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx, int reg);
|
||||
|
||||
void hvf_handle_io(CPUArchState *, uint16_t, void *, int, int, int);
|
||||
void hvf_handle_io(CPUState *, uint16_t, void *, int, int, int);
|
||||
void hvf_simulate_rdmsr(CPUX86State *env);
|
||||
void hvf_simulate_wrmsr(CPUX86State *env);
|
||||
|
||||
/* Host specific functions */
|
||||
int hvf_inject_interrupt(CPUArchState *env, int vector);
|
||||
|
|
|
@ -61,6 +61,7 @@
|
|||
#include "vmx.h"
|
||||
#include "x86.h"
|
||||
#include "x86_descr.h"
|
||||
#include "x86_flags.h"
|
||||
#include "x86_mmu.h"
|
||||
#include "x86_decode.h"
|
||||
#include "x86_emu.h"
|
||||
|
@ -103,7 +104,7 @@ static void update_apic_tpr(CPUState *cpu)
|
|||
|
||||
#define VECTORING_INFO_VECTOR_MASK 0xff
|
||||
|
||||
void hvf_handle_io(CPUArchState *env, uint16_t port, void *buffer,
|
||||
void hvf_handle_io(CPUState *env, uint16_t port, void *buffer,
|
||||
int direction, int size, int count)
|
||||
{
|
||||
int i;
|
||||
|
@ -434,6 +435,264 @@ static void hvf_cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
|||
}
|
||||
}
|
||||
|
||||
void hvf_load_regs(CPUState *cs)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
int i = 0;
|
||||
RRX(env, R_EAX) = rreg(cs->accel->fd, HV_X86_RAX);
|
||||
RRX(env, R_EBX) = rreg(cs->accel->fd, HV_X86_RBX);
|
||||
RRX(env, R_ECX) = rreg(cs->accel->fd, HV_X86_RCX);
|
||||
RRX(env, R_EDX) = rreg(cs->accel->fd, HV_X86_RDX);
|
||||
RRX(env, R_ESI) = rreg(cs->accel->fd, HV_X86_RSI);
|
||||
RRX(env, R_EDI) = rreg(cs->accel->fd, HV_X86_RDI);
|
||||
RRX(env, R_ESP) = rreg(cs->accel->fd, HV_X86_RSP);
|
||||
RRX(env, R_EBP) = rreg(cs->accel->fd, HV_X86_RBP);
|
||||
for (i = 8; i < 16; i++) {
|
||||
RRX(env, i) = rreg(cs->accel->fd, HV_X86_RAX + i);
|
||||
}
|
||||
|
||||
env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS);
|
||||
rflags_to_lflags(env);
|
||||
env->eip = rreg(cs->accel->fd, HV_X86_RIP);
|
||||
}
|
||||
|
||||
void hvf_store_regs(CPUState *cs)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
int i = 0;
|
||||
wreg(cs->accel->fd, HV_X86_RAX, RAX(env));
|
||||
wreg(cs->accel->fd, HV_X86_RBX, RBX(env));
|
||||
wreg(cs->accel->fd, HV_X86_RCX, RCX(env));
|
||||
wreg(cs->accel->fd, HV_X86_RDX, RDX(env));
|
||||
wreg(cs->accel->fd, HV_X86_RSI, RSI(env));
|
||||
wreg(cs->accel->fd, HV_X86_RDI, RDI(env));
|
||||
wreg(cs->accel->fd, HV_X86_RBP, RBP(env));
|
||||
wreg(cs->accel->fd, HV_X86_RSP, RSP(env));
|
||||
for (i = 8; i < 16; i++) {
|
||||
wreg(cs->accel->fd, HV_X86_RAX + i, RRX(env, i));
|
||||
}
|
||||
|
||||
lflags_to_rflags(env);
|
||||
wreg(cs->accel->fd, HV_X86_RFLAGS, env->eflags);
|
||||
macvm_set_rip(cs, env->eip);
|
||||
}
|
||||
|
||||
void hvf_simulate_rdmsr(CPUX86State *env)
|
||||
{
|
||||
X86CPU *cpu = env_archcpu(env);
|
||||
CPUState *cs = env_cpu(env);
|
||||
uint32_t msr = ECX(env);
|
||||
uint64_t val = 0;
|
||||
|
||||
switch (msr) {
|
||||
case MSR_IA32_TSC:
|
||||
val = rdtscp() + rvmcs(cs->accel->fd, VMCS_TSC_OFFSET);
|
||||
break;
|
||||
case MSR_IA32_APICBASE:
|
||||
val = cpu_get_apic_base(cpu->apic_state);
|
||||
break;
|
||||
case MSR_APIC_START ... MSR_APIC_END: {
|
||||
int ret;
|
||||
int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START;
|
||||
|
||||
ret = apic_msr_read(index, &val);
|
||||
if (ret < 0) {
|
||||
x86_emul_raise_exception(env, EXCP0D_GPF, 0);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
case MSR_IA32_UCODE_REV:
|
||||
val = cpu->ucode_rev;
|
||||
break;
|
||||
case MSR_EFER:
|
||||
val = rvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER);
|
||||
break;
|
||||
case MSR_FSBASE:
|
||||
val = rvmcs(cs->accel->fd, VMCS_GUEST_FS_BASE);
|
||||
break;
|
||||
case MSR_GSBASE:
|
||||
val = rvmcs(cs->accel->fd, VMCS_GUEST_GS_BASE);
|
||||
break;
|
||||
case MSR_KERNELGSBASE:
|
||||
val = rvmcs(cs->accel->fd, VMCS_HOST_FS_BASE);
|
||||
break;
|
||||
case MSR_STAR:
|
||||
abort();
|
||||
break;
|
||||
case MSR_LSTAR:
|
||||
abort();
|
||||
break;
|
||||
case MSR_CSTAR:
|
||||
abort();
|
||||
break;
|
||||
case MSR_IA32_MISC_ENABLE:
|
||||
val = env->msr_ia32_misc_enable;
|
||||
break;
|
||||
case MSR_MTRRphysBase(0):
|
||||
case MSR_MTRRphysBase(1):
|
||||
case MSR_MTRRphysBase(2):
|
||||
case MSR_MTRRphysBase(3):
|
||||
case MSR_MTRRphysBase(4):
|
||||
case MSR_MTRRphysBase(5):
|
||||
case MSR_MTRRphysBase(6):
|
||||
case MSR_MTRRphysBase(7):
|
||||
val = env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base;
|
||||
break;
|
||||
case MSR_MTRRphysMask(0):
|
||||
case MSR_MTRRphysMask(1):
|
||||
case MSR_MTRRphysMask(2):
|
||||
case MSR_MTRRphysMask(3):
|
||||
case MSR_MTRRphysMask(4):
|
||||
case MSR_MTRRphysMask(5):
|
||||
case MSR_MTRRphysMask(6):
|
||||
case MSR_MTRRphysMask(7):
|
||||
val = env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask;
|
||||
break;
|
||||
case MSR_MTRRfix64K_00000:
|
||||
val = env->mtrr_fixed[0];
|
||||
break;
|
||||
case MSR_MTRRfix16K_80000:
|
||||
case MSR_MTRRfix16K_A0000:
|
||||
val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1];
|
||||
break;
|
||||
case MSR_MTRRfix4K_C0000:
|
||||
case MSR_MTRRfix4K_C8000:
|
||||
case MSR_MTRRfix4K_D0000:
|
||||
case MSR_MTRRfix4K_D8000:
|
||||
case MSR_MTRRfix4K_E0000:
|
||||
case MSR_MTRRfix4K_E8000:
|
||||
case MSR_MTRRfix4K_F0000:
|
||||
case MSR_MTRRfix4K_F8000:
|
||||
val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3];
|
||||
break;
|
||||
case MSR_MTRRdefType:
|
||||
val = env->mtrr_deftype;
|
||||
break;
|
||||
case MSR_CORE_THREAD_COUNT:
|
||||
val = cpu_x86_get_msr_core_thread_count(cpu);
|
||||
break;
|
||||
default:
|
||||
/* fprintf(stderr, "%s: unknown msr 0x%x\n", __func__, msr); */
|
||||
val = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
RAX(env) = (uint32_t)val;
|
||||
RDX(env) = (uint32_t)(val >> 32);
|
||||
}
|
||||
|
||||
void hvf_simulate_wrmsr(CPUX86State *env)
|
||||
{
|
||||
X86CPU *cpu = env_archcpu(env);
|
||||
CPUState *cs = env_cpu(env);
|
||||
uint32_t msr = ECX(env);
|
||||
uint64_t data = ((uint64_t)EDX(env) << 32) | EAX(env);
|
||||
|
||||
switch (msr) {
|
||||
case MSR_IA32_TSC:
|
||||
break;
|
||||
case MSR_IA32_APICBASE: {
|
||||
int r;
|
||||
|
||||
r = cpu_set_apic_base(cpu->apic_state, data);
|
||||
if (r < 0) {
|
||||
x86_emul_raise_exception(env, EXCP0D_GPF, 0);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
case MSR_APIC_START ... MSR_APIC_END: {
|
||||
int ret;
|
||||
int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START;
|
||||
|
||||
ret = apic_msr_write(index, data);
|
||||
if (ret < 0) {
|
||||
x86_emul_raise_exception(env, EXCP0D_GPF, 0);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
case MSR_FSBASE:
|
||||
wvmcs(cs->accel->fd, VMCS_GUEST_FS_BASE, data);
|
||||
break;
|
||||
case MSR_GSBASE:
|
||||
wvmcs(cs->accel->fd, VMCS_GUEST_GS_BASE, data);
|
||||
break;
|
||||
case MSR_KERNELGSBASE:
|
||||
wvmcs(cs->accel->fd, VMCS_HOST_FS_BASE, data);
|
||||
break;
|
||||
case MSR_STAR:
|
||||
abort();
|
||||
break;
|
||||
case MSR_LSTAR:
|
||||
abort();
|
||||
break;
|
||||
case MSR_CSTAR:
|
||||
abort();
|
||||
break;
|
||||
case MSR_EFER:
|
||||
/*printf("new efer %llx\n", EFER(cs));*/
|
||||
wvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER, data);
|
||||
if (data & MSR_EFER_NXE) {
|
||||
hv_vcpu_invalidate_tlb(cs->accel->fd);
|
||||
}
|
||||
break;
|
||||
case MSR_MTRRphysBase(0):
|
||||
case MSR_MTRRphysBase(1):
|
||||
case MSR_MTRRphysBase(2):
|
||||
case MSR_MTRRphysBase(3):
|
||||
case MSR_MTRRphysBase(4):
|
||||
case MSR_MTRRphysBase(5):
|
||||
case MSR_MTRRphysBase(6):
|
||||
case MSR_MTRRphysBase(7):
|
||||
env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base = data;
|
||||
break;
|
||||
case MSR_MTRRphysMask(0):
|
||||
case MSR_MTRRphysMask(1):
|
||||
case MSR_MTRRphysMask(2):
|
||||
case MSR_MTRRphysMask(3):
|
||||
case MSR_MTRRphysMask(4):
|
||||
case MSR_MTRRphysMask(5):
|
||||
case MSR_MTRRphysMask(6):
|
||||
case MSR_MTRRphysMask(7):
|
||||
env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask = data;
|
||||
break;
|
||||
case MSR_MTRRfix64K_00000:
|
||||
env->mtrr_fixed[ECX(env) - MSR_MTRRfix64K_00000] = data;
|
||||
break;
|
||||
case MSR_MTRRfix16K_80000:
|
||||
case MSR_MTRRfix16K_A0000:
|
||||
env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1] = data;
|
||||
break;
|
||||
case MSR_MTRRfix4K_C0000:
|
||||
case MSR_MTRRfix4K_C8000:
|
||||
case MSR_MTRRfix4K_D0000:
|
||||
case MSR_MTRRfix4K_D8000:
|
||||
case MSR_MTRRfix4K_E0000:
|
||||
case MSR_MTRRfix4K_E8000:
|
||||
case MSR_MTRRfix4K_F0000:
|
||||
case MSR_MTRRfix4K_F8000:
|
||||
env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3] = data;
|
||||
break;
|
||||
case MSR_MTRRdefType:
|
||||
env->mtrr_deftype = data;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* Related to support known hypervisor interface */
|
||||
/* if (g_hypervisor_iface)
|
||||
g_hypervisor_iface->wrmsr_handler(cs, msr, data);
|
||||
|
||||
printf("write msr %llx\n", RCX(cs));*/
|
||||
}
|
||||
|
||||
int hvf_vcpu_exec(CPUState *cpu)
|
||||
{
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
|
@ -517,10 +776,10 @@ int hvf_vcpu_exec(CPUState *cpu)
|
|||
if (ept_emulation_fault(slot, gpa, exit_qual)) {
|
||||
struct x86_decode decode;
|
||||
|
||||
load_regs(cpu);
|
||||
hvf_load_regs(cpu);
|
||||
decode_instruction(env, &decode);
|
||||
exec_instruction(env, &decode);
|
||||
store_regs(cpu);
|
||||
hvf_store_regs(cpu);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
@ -535,8 +794,8 @@ int hvf_vcpu_exec(CPUState *cpu)
|
|||
|
||||
if (!string && in) {
|
||||
uint64_t val = 0;
|
||||
load_regs(cpu);
|
||||
hvf_handle_io(env, port, &val, 0, size, 1);
|
||||
hvf_load_regs(cpu);
|
||||
hvf_handle_io(env_cpu(env), port, &val, 0, size, 1);
|
||||
if (size == 1) {
|
||||
AL(env) = val;
|
||||
} else if (size == 2) {
|
||||
|
@ -547,21 +806,21 @@ int hvf_vcpu_exec(CPUState *cpu)
|
|||
RAX(env) = (uint64_t)val;
|
||||
}
|
||||
env->eip += ins_len;
|
||||
store_regs(cpu);
|
||||
hvf_store_regs(cpu);
|
||||
break;
|
||||
} else if (!string && !in) {
|
||||
RAX(env) = rreg(cpu->accel->fd, HV_X86_RAX);
|
||||
hvf_handle_io(env, port, &RAX(env), 1, size, 1);
|
||||
hvf_handle_io(env_cpu(env), port, &RAX(env), 1, size, 1);
|
||||
macvm_set_rip(cpu, rip + ins_len);
|
||||
break;
|
||||
}
|
||||
struct x86_decode decode;
|
||||
|
||||
load_regs(cpu);
|
||||
hvf_load_regs(cpu);
|
||||
decode_instruction(env, &decode);
|
||||
assert(ins_len == decode.len);
|
||||
exec_instruction(env, &decode);
|
||||
store_regs(cpu);
|
||||
hvf_store_regs(cpu);
|
||||
|
||||
break;
|
||||
}
|
||||
|
@ -614,21 +873,21 @@ int hvf_vcpu_exec(CPUState *cpu)
|
|||
case EXIT_REASON_RDMSR:
|
||||
case EXIT_REASON_WRMSR:
|
||||
{
|
||||
load_regs(cpu);
|
||||
hvf_load_regs(cpu);
|
||||
if (exit_reason == EXIT_REASON_RDMSR) {
|
||||
simulate_rdmsr(env);
|
||||
hvf_simulate_rdmsr(env);
|
||||
} else {
|
||||
simulate_wrmsr(env);
|
||||
hvf_simulate_wrmsr(env);
|
||||
}
|
||||
env->eip += ins_len;
|
||||
store_regs(cpu);
|
||||
hvf_store_regs(cpu);
|
||||
break;
|
||||
}
|
||||
case EXIT_REASON_CR_ACCESS: {
|
||||
int cr;
|
||||
int reg;
|
||||
|
||||
load_regs(cpu);
|
||||
hvf_load_regs(cpu);
|
||||
cr = exit_qual & 15;
|
||||
reg = (exit_qual >> 8) & 15;
|
||||
|
||||
|
@ -656,16 +915,16 @@ int hvf_vcpu_exec(CPUState *cpu)
|
|||
abort();
|
||||
}
|
||||
env->eip += ins_len;
|
||||
store_regs(cpu);
|
||||
hvf_store_regs(cpu);
|
||||
break;
|
||||
}
|
||||
case EXIT_REASON_APIC_ACCESS: { /* TODO */
|
||||
struct x86_decode decode;
|
||||
|
||||
load_regs(cpu);
|
||||
hvf_load_regs(cpu);
|
||||
decode_instruction(env, &decode);
|
||||
exec_instruction(env, &decode);
|
||||
store_regs(cpu);
|
||||
hvf_store_regs(cpu);
|
||||
break;
|
||||
}
|
||||
case EXIT_REASON_TPR: {
|
||||
|
@ -674,7 +933,7 @@ int hvf_vcpu_exec(CPUState *cpu)
|
|||
}
|
||||
case EXIT_REASON_TASK_SWITCH: {
|
||||
uint64_t vinfo = rvmcs(cpu->accel->fd, VMCS_IDT_VECTORING_INFO);
|
||||
x68_segment_selector sel = {.sel = exit_qual & 0xffff};
|
||||
x86_segment_selector sel = {.sel = exit_qual & 0xffff};
|
||||
vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3,
|
||||
vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo
|
||||
& VMCS_INTR_T_MASK);
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
|
||||
bool x86_read_segment_descriptor(CPUState *cpu,
|
||||
struct x86_segment_descriptor *desc,
|
||||
x68_segment_selector sel)
|
||||
x86_segment_selector sel)
|
||||
{
|
||||
target_ulong base;
|
||||
uint32_t limit;
|
||||
|
@ -78,7 +78,7 @@ bool x86_read_segment_descriptor(CPUState *cpu,
|
|||
|
||||
bool x86_write_segment_descriptor(CPUState *cpu,
|
||||
struct x86_segment_descriptor *desc,
|
||||
x68_segment_selector sel)
|
||||
x86_segment_selector sel)
|
||||
{
|
||||
target_ulong base;
|
||||
uint32_t limit;
|
||||
|
|
|
@ -183,7 +183,7 @@ static inline uint32_t x86_call_gate_offset(x86_call_gate *gate)
|
|||
#define GDT_SEL 0
|
||||
#define LDT_SEL 1
|
||||
|
||||
typedef struct x68_segment_selector {
|
||||
typedef struct x86_segment_selector {
|
||||
union {
|
||||
uint16_t sel;
|
||||
struct {
|
||||
|
@ -192,7 +192,7 @@ typedef struct x68_segment_selector {
|
|||
uint16_t index:13;
|
||||
};
|
||||
};
|
||||
} __attribute__ ((__packed__)) x68_segment_selector;
|
||||
} __attribute__ ((__packed__)) x86_segment_selector;
|
||||
|
||||
/* useful register access macros */
|
||||
#define x86_reg(cpu, reg) ((x86_register *) &cpu->regs[reg])
|
||||
|
@ -250,10 +250,10 @@ typedef struct x68_segment_selector {
|
|||
/* deal with GDT/LDT descriptors in memory */
|
||||
bool x86_read_segment_descriptor(CPUState *cpu,
|
||||
struct x86_segment_descriptor *desc,
|
||||
x68_segment_selector sel);
|
||||
x86_segment_selector sel);
|
||||
bool x86_write_segment_descriptor(CPUState *cpu,
|
||||
struct x86_segment_descriptor *desc,
|
||||
x68_segment_selector sel);
|
||||
x86_segment_selector sel);
|
||||
|
||||
bool x86_read_call_gate(CPUState *cpu, struct x86_call_gate *idt_desc,
|
||||
int gate);
|
||||
|
|
|
@ -1893,6 +1893,16 @@ static void decode_prefix(CPUX86State *env, struct x86_decode *decode)
|
|||
}
|
||||
}
|
||||
|
||||
static struct x86_segment_descriptor get_cs_descriptor(CPUState *s)
|
||||
{
|
||||
struct vmx_segment vmx_cs;
|
||||
x86_segment_descriptor cs;
|
||||
vmx_read_segment_descriptor(s, &vmx_cs, R_CS);
|
||||
vmx_segment_to_x86_descriptor(s, &vmx_cs, &cs);
|
||||
|
||||
return cs;
|
||||
}
|
||||
|
||||
void set_addressing_size(CPUX86State *env, struct x86_decode *decode)
|
||||
{
|
||||
decode->addressing_size = -1;
|
||||
|
@ -1904,10 +1914,9 @@ void set_addressing_size(CPUX86State *env, struct x86_decode *decode)
|
|||
}
|
||||
} else if (!x86_is_long_mode(env_cpu(env))) {
|
||||
/* protected */
|
||||
struct vmx_segment cs;
|
||||
vmx_read_segment_descriptor(env_cpu(env), &cs, R_CS);
|
||||
x86_segment_descriptor cs = get_cs_descriptor(env_cpu(env));
|
||||
/* check db */
|
||||
if ((cs.ar >> 14) & 1) {
|
||||
if (cs.db) {
|
||||
if (decode->addr_size_override) {
|
||||
decode->addressing_size = 2;
|
||||
} else {
|
||||
|
@ -1941,10 +1950,9 @@ void set_operand_size(CPUX86State *env, struct x86_decode *decode)
|
|||
}
|
||||
} else if (!x86_is_long_mode(env_cpu(env))) {
|
||||
/* protected */
|
||||
struct vmx_segment cs;
|
||||
vmx_read_segment_descriptor(env_cpu(env), &cs, R_CS);
|
||||
x86_segment_descriptor cs = get_cs_descriptor(env_cpu(env));
|
||||
/* check db */
|
||||
if ((cs.ar >> 14) & 1) {
|
||||
if (cs.db) {
|
||||
if (decode->op_size_override) {
|
||||
decode->operand_size = 2;
|
||||
} else{
|
||||
|
|
|
@ -60,14 +60,14 @@ uint64_t vmx_read_segment_base(CPUState *cpu, X86Seg seg)
|
|||
return rvmcs(cpu->accel->fd, vmx_segment_fields[seg].base);
|
||||
}
|
||||
|
||||
x68_segment_selector vmx_read_segment_selector(CPUState *cpu, X86Seg seg)
|
||||
x86_segment_selector vmx_read_segment_selector(CPUState *cpu, X86Seg seg)
|
||||
{
|
||||
x68_segment_selector sel;
|
||||
x86_segment_selector sel;
|
||||
sel.sel = rvmcs(cpu->accel->fd, vmx_segment_fields[seg].selector);
|
||||
return sel;
|
||||
}
|
||||
|
||||
void vmx_write_segment_selector(CPUState *cpu, x68_segment_selector selector, X86Seg seg)
|
||||
void vmx_write_segment_selector(CPUState *cpu, x86_segment_selector selector, X86Seg seg)
|
||||
{
|
||||
wvmcs(cpu->accel->fd, vmx_segment_fields[seg].selector, selector.sel);
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc, X86Se
|
|||
wvmcs(cpu->accel->fd, sf->ar_bytes, desc->ar);
|
||||
}
|
||||
|
||||
void x86_segment_descriptor_to_vmx(CPUState *cpu, x68_segment_selector selector,
|
||||
void x86_segment_descriptor_to_vmx(CPUState *cpu, x86_segment_selector selector,
|
||||
struct x86_segment_descriptor *desc,
|
||||
struct vmx_segment *vmx_desc)
|
||||
{
|
||||
|
|
|
@ -34,10 +34,10 @@ void vmx_read_segment_descriptor(CPUState *cpu,
|
|||
void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc,
|
||||
enum X86Seg seg);
|
||||
|
||||
x68_segment_selector vmx_read_segment_selector(CPUState *cpu,
|
||||
x86_segment_selector vmx_read_segment_selector(CPUState *cpu,
|
||||
enum X86Seg seg);
|
||||
void vmx_write_segment_selector(CPUState *cpu,
|
||||
x68_segment_selector selector,
|
||||
x86_segment_selector selector,
|
||||
enum X86Seg seg);
|
||||
|
||||
uint64_t vmx_read_segment_base(CPUState *cpu, enum X86Seg seg);
|
||||
|
@ -45,7 +45,7 @@ void vmx_write_segment_base(CPUState *cpu, enum X86Seg seg,
|
|||
uint64_t base);
|
||||
|
||||
void x86_segment_descriptor_to_vmx(CPUState *cpu,
|
||||
x68_segment_selector selector,
|
||||
x86_segment_selector selector,
|
||||
struct x86_segment_descriptor *desc,
|
||||
struct vmx_segment *vmx_desc);
|
||||
|
||||
|
|
|
@ -44,9 +44,7 @@
|
|||
#include "x86_flags.h"
|
||||
#include "vmcs.h"
|
||||
#include "vmx.h"
|
||||
|
||||
void hvf_handle_io(CPUState *cs, uint16_t port, void *data,
|
||||
int direction, int size, uint32_t count);
|
||||
#include "hvf-i386.h"
|
||||
|
||||
#define EXEC_2OP_FLAGS_CMD(env, decode, cmd, FLAGS_FUNC, save_res) \
|
||||
{ \
|
||||
|
@ -663,8 +661,7 @@ static void exec_lods(CPUX86State *env, struct x86_decode *decode)
|
|||
env->eip += decode->len;
|
||||
}
|
||||
|
||||
static void raise_exception(CPUX86State *env, int exception_index,
|
||||
int error_code)
|
||||
void x86_emul_raise_exception(CPUX86State *env, int exception_index, int error_code)
|
||||
{
|
||||
env->exception_nr = exception_index;
|
||||
env->error_code = error_code;
|
||||
|
@ -672,227 +669,15 @@ static void raise_exception(CPUX86State *env, int exception_index,
|
|||
env->exception_injected = 1;
|
||||
}
|
||||
|
||||
void simulate_rdmsr(CPUX86State *env)
|
||||
{
|
||||
X86CPU *cpu = env_archcpu(env);
|
||||
CPUState *cs = env_cpu(env);
|
||||
uint32_t msr = ECX(env);
|
||||
uint64_t val = 0;
|
||||
|
||||
switch (msr) {
|
||||
case MSR_IA32_TSC:
|
||||
val = rdtscp() + rvmcs(cs->accel->fd, VMCS_TSC_OFFSET);
|
||||
break;
|
||||
case MSR_IA32_APICBASE:
|
||||
val = cpu_get_apic_base(cpu->apic_state);
|
||||
break;
|
||||
case MSR_APIC_START ... MSR_APIC_END: {
|
||||
int ret;
|
||||
int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START;
|
||||
|
||||
ret = apic_msr_read(index, &val);
|
||||
if (ret < 0) {
|
||||
raise_exception(env, EXCP0D_GPF, 0);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
case MSR_IA32_UCODE_REV:
|
||||
val = cpu->ucode_rev;
|
||||
break;
|
||||
case MSR_EFER:
|
||||
val = rvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER);
|
||||
break;
|
||||
case MSR_FSBASE:
|
||||
val = rvmcs(cs->accel->fd, VMCS_GUEST_FS_BASE);
|
||||
break;
|
||||
case MSR_GSBASE:
|
||||
val = rvmcs(cs->accel->fd, VMCS_GUEST_GS_BASE);
|
||||
break;
|
||||
case MSR_KERNELGSBASE:
|
||||
val = rvmcs(cs->accel->fd, VMCS_HOST_FS_BASE);
|
||||
break;
|
||||
case MSR_STAR:
|
||||
abort();
|
||||
break;
|
||||
case MSR_LSTAR:
|
||||
abort();
|
||||
break;
|
||||
case MSR_CSTAR:
|
||||
abort();
|
||||
break;
|
||||
case MSR_IA32_MISC_ENABLE:
|
||||
val = env->msr_ia32_misc_enable;
|
||||
break;
|
||||
case MSR_MTRRphysBase(0):
|
||||
case MSR_MTRRphysBase(1):
|
||||
case MSR_MTRRphysBase(2):
|
||||
case MSR_MTRRphysBase(3):
|
||||
case MSR_MTRRphysBase(4):
|
||||
case MSR_MTRRphysBase(5):
|
||||
case MSR_MTRRphysBase(6):
|
||||
case MSR_MTRRphysBase(7):
|
||||
val = env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base;
|
||||
break;
|
||||
case MSR_MTRRphysMask(0):
|
||||
case MSR_MTRRphysMask(1):
|
||||
case MSR_MTRRphysMask(2):
|
||||
case MSR_MTRRphysMask(3):
|
||||
case MSR_MTRRphysMask(4):
|
||||
case MSR_MTRRphysMask(5):
|
||||
case MSR_MTRRphysMask(6):
|
||||
case MSR_MTRRphysMask(7):
|
||||
val = env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask;
|
||||
break;
|
||||
case MSR_MTRRfix64K_00000:
|
||||
val = env->mtrr_fixed[0];
|
||||
break;
|
||||
case MSR_MTRRfix16K_80000:
|
||||
case MSR_MTRRfix16K_A0000:
|
||||
val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1];
|
||||
break;
|
||||
case MSR_MTRRfix4K_C0000:
|
||||
case MSR_MTRRfix4K_C8000:
|
||||
case MSR_MTRRfix4K_D0000:
|
||||
case MSR_MTRRfix4K_D8000:
|
||||
case MSR_MTRRfix4K_E0000:
|
||||
case MSR_MTRRfix4K_E8000:
|
||||
case MSR_MTRRfix4K_F0000:
|
||||
case MSR_MTRRfix4K_F8000:
|
||||
val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3];
|
||||
break;
|
||||
case MSR_MTRRdefType:
|
||||
val = env->mtrr_deftype;
|
||||
break;
|
||||
case MSR_CORE_THREAD_COUNT:
|
||||
val = cpu_x86_get_msr_core_thread_count(cpu);
|
||||
break;
|
||||
default:
|
||||
/* fprintf(stderr, "%s: unknown msr 0x%x\n", __func__, msr); */
|
||||
val = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
RAX(env) = (uint32_t)val;
|
||||
RDX(env) = (uint32_t)(val >> 32);
|
||||
}
|
||||
|
||||
static void exec_rdmsr(CPUX86State *env, struct x86_decode *decode)
|
||||
{
|
||||
simulate_rdmsr(env);
|
||||
hvf_simulate_rdmsr(env);
|
||||
env->eip += decode->len;
|
||||
}
|
||||
|
||||
void simulate_wrmsr(CPUX86State *env)
|
||||
{
|
||||
X86CPU *cpu = env_archcpu(env);
|
||||
CPUState *cs = env_cpu(env);
|
||||
uint32_t msr = ECX(env);
|
||||
uint64_t data = ((uint64_t)EDX(env) << 32) | EAX(env);
|
||||
|
||||
switch (msr) {
|
||||
case MSR_IA32_TSC:
|
||||
break;
|
||||
case MSR_IA32_APICBASE: {
|
||||
int r;
|
||||
|
||||
r = cpu_set_apic_base(cpu->apic_state, data);
|
||||
if (r < 0) {
|
||||
raise_exception(env, EXCP0D_GPF, 0);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
case MSR_APIC_START ... MSR_APIC_END: {
|
||||
int ret;
|
||||
int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START;
|
||||
|
||||
ret = apic_msr_write(index, data);
|
||||
if (ret < 0) {
|
||||
raise_exception(env, EXCP0D_GPF, 0);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
case MSR_FSBASE:
|
||||
wvmcs(cs->accel->fd, VMCS_GUEST_FS_BASE, data);
|
||||
break;
|
||||
case MSR_GSBASE:
|
||||
wvmcs(cs->accel->fd, VMCS_GUEST_GS_BASE, data);
|
||||
break;
|
||||
case MSR_KERNELGSBASE:
|
||||
wvmcs(cs->accel->fd, VMCS_HOST_FS_BASE, data);
|
||||
break;
|
||||
case MSR_STAR:
|
||||
abort();
|
||||
break;
|
||||
case MSR_LSTAR:
|
||||
abort();
|
||||
break;
|
||||
case MSR_CSTAR:
|
||||
abort();
|
||||
break;
|
||||
case MSR_EFER:
|
||||
/*printf("new efer %llx\n", EFER(cs));*/
|
||||
wvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER, data);
|
||||
if (data & MSR_EFER_NXE) {
|
||||
hv_vcpu_invalidate_tlb(cs->accel->fd);
|
||||
}
|
||||
break;
|
||||
case MSR_MTRRphysBase(0):
|
||||
case MSR_MTRRphysBase(1):
|
||||
case MSR_MTRRphysBase(2):
|
||||
case MSR_MTRRphysBase(3):
|
||||
case MSR_MTRRphysBase(4):
|
||||
case MSR_MTRRphysBase(5):
|
||||
case MSR_MTRRphysBase(6):
|
||||
case MSR_MTRRphysBase(7):
|
||||
env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base = data;
|
||||
break;
|
||||
case MSR_MTRRphysMask(0):
|
||||
case MSR_MTRRphysMask(1):
|
||||
case MSR_MTRRphysMask(2):
|
||||
case MSR_MTRRphysMask(3):
|
||||
case MSR_MTRRphysMask(4):
|
||||
case MSR_MTRRphysMask(5):
|
||||
case MSR_MTRRphysMask(6):
|
||||
case MSR_MTRRphysMask(7):
|
||||
env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask = data;
|
||||
break;
|
||||
case MSR_MTRRfix64K_00000:
|
||||
env->mtrr_fixed[ECX(env) - MSR_MTRRfix64K_00000] = data;
|
||||
break;
|
||||
case MSR_MTRRfix16K_80000:
|
||||
case MSR_MTRRfix16K_A0000:
|
||||
env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1] = data;
|
||||
break;
|
||||
case MSR_MTRRfix4K_C0000:
|
||||
case MSR_MTRRfix4K_C8000:
|
||||
case MSR_MTRRfix4K_D0000:
|
||||
case MSR_MTRRfix4K_D8000:
|
||||
case MSR_MTRRfix4K_E0000:
|
||||
case MSR_MTRRfix4K_E8000:
|
||||
case MSR_MTRRfix4K_F0000:
|
||||
case MSR_MTRRfix4K_F8000:
|
||||
env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3] = data;
|
||||
break;
|
||||
case MSR_MTRRdefType:
|
||||
env->mtrr_deftype = data;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* Related to support known hypervisor interface */
|
||||
/* if (g_hypervisor_iface)
|
||||
g_hypervisor_iface->wrmsr_handler(cs, msr, data);
|
||||
|
||||
printf("write msr %llx\n", RCX(cs));*/
|
||||
}
|
||||
|
||||
static void exec_wrmsr(CPUX86State *env, struct x86_decode *decode)
|
||||
{
|
||||
simulate_wrmsr(env);
|
||||
hvf_simulate_wrmsr(env);
|
||||
env->eip += decode->len;
|
||||
}
|
||||
|
||||
|
@ -1454,58 +1239,8 @@ static void init_cmd_handler(void)
|
|||
}
|
||||
}
|
||||
|
||||
void load_regs(CPUState *cs)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
int i = 0;
|
||||
RRX(env, R_EAX) = rreg(cs->accel->fd, HV_X86_RAX);
|
||||
RRX(env, R_EBX) = rreg(cs->accel->fd, HV_X86_RBX);
|
||||
RRX(env, R_ECX) = rreg(cs->accel->fd, HV_X86_RCX);
|
||||
RRX(env, R_EDX) = rreg(cs->accel->fd, HV_X86_RDX);
|
||||
RRX(env, R_ESI) = rreg(cs->accel->fd, HV_X86_RSI);
|
||||
RRX(env, R_EDI) = rreg(cs->accel->fd, HV_X86_RDI);
|
||||
RRX(env, R_ESP) = rreg(cs->accel->fd, HV_X86_RSP);
|
||||
RRX(env, R_EBP) = rreg(cs->accel->fd, HV_X86_RBP);
|
||||
for (i = 8; i < 16; i++) {
|
||||
RRX(env, i) = rreg(cs->accel->fd, HV_X86_RAX + i);
|
||||
}
|
||||
|
||||
env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS);
|
||||
rflags_to_lflags(env);
|
||||
env->eip = rreg(cs->accel->fd, HV_X86_RIP);
|
||||
}
|
||||
|
||||
void store_regs(CPUState *cs)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
int i = 0;
|
||||
wreg(cs->accel->fd, HV_X86_RAX, RAX(env));
|
||||
wreg(cs->accel->fd, HV_X86_RBX, RBX(env));
|
||||
wreg(cs->accel->fd, HV_X86_RCX, RCX(env));
|
||||
wreg(cs->accel->fd, HV_X86_RDX, RDX(env));
|
||||
wreg(cs->accel->fd, HV_X86_RSI, RSI(env));
|
||||
wreg(cs->accel->fd, HV_X86_RDI, RDI(env));
|
||||
wreg(cs->accel->fd, HV_X86_RBP, RBP(env));
|
||||
wreg(cs->accel->fd, HV_X86_RSP, RSP(env));
|
||||
for (i = 8; i < 16; i++) {
|
||||
wreg(cs->accel->fd, HV_X86_RAX + i, RRX(env, i));
|
||||
}
|
||||
|
||||
lflags_to_rflags(env);
|
||||
wreg(cs->accel->fd, HV_X86_RFLAGS, env->eflags);
|
||||
macvm_set_rip(cs, env->eip);
|
||||
}
|
||||
|
||||
bool exec_instruction(CPUX86State *env, struct x86_decode *ins)
|
||||
{
|
||||
/*if (hvf_vcpu_id(cs))
|
||||
printf("%d, %llx: exec_instruction %s\n", hvf_vcpu_id(cs), env->eip,
|
||||
decode_cmd_to_string(ins->cmd));*/
|
||||
|
||||
if (!_cmd_handler[ins->cmd].handler) {
|
||||
printf("Unimplemented handler (%llx) for %d (%x %x) \n", env->eip,
|
||||
ins->cmd, ins->opcode[0],
|
||||
|
|
|
@ -25,12 +25,7 @@
|
|||
|
||||
void init_emu(void);
|
||||
bool exec_instruction(CPUX86State *env, struct x86_decode *ins);
|
||||
|
||||
void load_regs(CPUState *cpu);
|
||||
void store_regs(CPUState *cpu);
|
||||
|
||||
void simulate_rdmsr(CPUX86State *env);
|
||||
void simulate_wrmsr(CPUX86State *env);
|
||||
void x86_emul_raise_exception(CPUX86State *env, int exception_index, int error_code);
|
||||
|
||||
target_ulong read_reg(CPUX86State *env, int reg, int size);
|
||||
void write_reg(CPUX86State *env, int reg, target_ulong val, int size);
|
||||
|
|
|
@ -76,16 +76,16 @@ static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
|
|||
RSI(env) = tss->esi;
|
||||
RDI(env) = tss->edi;
|
||||
|
||||
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ldt}}, R_LDTR);
|
||||
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->es}}, R_ES);
|
||||
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->cs}}, R_CS);
|
||||
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ss}}, R_SS);
|
||||
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ds}}, R_DS);
|
||||
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->fs}}, R_FS);
|
||||
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->gs}}, R_GS);
|
||||
vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->ldt}}, R_LDTR);
|
||||
vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->es}}, R_ES);
|
||||
vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->cs}}, R_CS);
|
||||
vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->ss}}, R_SS);
|
||||
vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->ds}}, R_DS);
|
||||
vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->fs}}, R_FS);
|
||||
vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->gs}}, R_GS);
|
||||
}
|
||||
|
||||
static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segment_selector old_tss_sel,
|
||||
static int task_switch_32(CPUState *cpu, x86_segment_selector tss_sel, x86_segment_selector old_tss_sel,
|
||||
uint64_t old_tss_base, struct x86_segment_descriptor *new_desc)
|
||||
{
|
||||
struct x86_tss_segment32 tss_seg;
|
||||
|
@ -108,7 +108,7 @@ static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segme
|
|||
return 0;
|
||||
}
|
||||
|
||||
void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type)
|
||||
void vmx_handle_task_switch(CPUState *cpu, x86_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type)
|
||||
{
|
||||
uint64_t rip = rreg(cpu->accel->fd, HV_X86_RIP);
|
||||
if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION &&
|
||||
|
@ -119,10 +119,10 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea
|
|||
return;
|
||||
}
|
||||
|
||||
load_regs(cpu);
|
||||
hvf_load_regs(cpu);
|
||||
|
||||
struct x86_segment_descriptor curr_tss_desc, next_tss_desc;
|
||||
x68_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR);
|
||||
x86_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR);
|
||||
uint64_t old_tss_base = vmx_read_segment_base(cpu, R_TR);
|
||||
uint32_t desc_limit;
|
||||
struct x86_call_gate task_gate_desc;
|
||||
|
@ -140,7 +140,7 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea
|
|||
x86_read_call_gate(cpu, &task_gate_desc, gate);
|
||||
|
||||
dpl = task_gate_desc.dpl;
|
||||
x68_segment_selector cs = vmx_read_segment_selector(cpu, R_CS);
|
||||
x86_segment_selector cs = vmx_read_segment_selector(cpu, R_CS);
|
||||
if (tss_sel.rpl > dpl || cs.rpl > dpl)
|
||||
;//DPRINTF("emulate_gp");
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea
|
|||
x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg);
|
||||
vmx_write_segment_descriptor(cpu, &vmx_seg, R_TR);
|
||||
|
||||
store_regs(cpu);
|
||||
hvf_store_regs(cpu);
|
||||
|
||||
hv_vcpu_invalidate_tlb(cpu->accel->fd);
|
||||
}
|
||||
|
|
|
@ -15,6 +15,6 @@
|
|||
#ifndef HVF_X86_TASK_H
|
||||
#define HVF_X86_TASK_H
|
||||
|
||||
void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel,
|
||||
void vmx_handle_task_switch(CPUState *cpu, x86_segment_selector tss_sel,
|
||||
int reason, bool gate_valid, uint8_t gate, uint64_t gate_type);
|
||||
#endif
|
||||
|
|
|
@ -31,4 +31,7 @@ void hvf_get_xsave(CPUState *cs);
|
|||
void hvf_get_msrs(CPUState *cs);
|
||||
void vmx_clear_int_window_exiting(CPUState *cs);
|
||||
void vmx_update_tpr(CPUState *cs);
|
||||
|
||||
void hvf_load_regs(CPUState *cpu);
|
||||
void hvf_store_regs(CPUState *cpu);
|
||||
#endif
|
||||
|
|
|
@ -700,13 +700,6 @@ static void rv128_base_cpu_init(Object *obj)
|
|||
RISCVCPU *cpu = RISCV_CPU(obj);
|
||||
CPURISCVState *env = &cpu->env;
|
||||
|
||||
if (qemu_tcg_mttcg_enabled()) {
|
||||
/* Missing 128-bit aligned atomics */
|
||||
error_report("128-bit RISC-V currently does not work with Multi "
|
||||
"Threaded TCG. Please use: -accel tcg,thread=single");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
cpu->cfg.mmu = true;
|
||||
cpu->cfg.pmp = true;
|
||||
|
||||
|
@ -3051,15 +3044,6 @@ void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename)
|
|||
}
|
||||
#endif
|
||||
|
||||
#define DEFINE_CPU(type_name, misa_mxl_max, initfn) \
|
||||
{ \
|
||||
.name = (type_name), \
|
||||
.parent = TYPE_RISCV_CPU, \
|
||||
.instance_init = (initfn), \
|
||||
.class_init = riscv_cpu_class_init, \
|
||||
.class_data = (void *)(misa_mxl_max) \
|
||||
}
|
||||
|
||||
#define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \
|
||||
{ \
|
||||
.name = (type_name), \
|
||||
|
|
|
@ -1014,6 +1014,7 @@ static bool riscv_cpu_is_generic(Object *cpu_obj)
|
|||
static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp)
|
||||
{
|
||||
RISCVCPU *cpu = RISCV_CPU(cs);
|
||||
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
|
||||
|
||||
if (!riscv_cpu_tcg_compatible(cpu)) {
|
||||
g_autofree char *name = riscv_cpu_get_name(cpu);
|
||||
|
@ -1022,6 +1023,14 @@ static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (mcc->misa_mxl_max >= MXL_RV128 && qemu_tcg_mttcg_enabled()) {
|
||||
/* Missing 128-bit aligned atomics */
|
||||
error_setg(errp,
|
||||
"128-bit RISC-V currently does not work with Multi "
|
||||
"Threaded TCG. Please use: -accel tcg,thread=single");
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
CPURISCVState *env = &cpu->env;
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue