* meson: small old patches (one from 2022)

* rust: pl011: forward port some changes from C version
 * target/i386: small improvements to TCG emulation
 * target/i386: HVF emulation cleanups
 * target/i386: add its_no feature
 * cs4231a: fix assertion failure
 * update Linux headers
 -----BEGIN PGP SIGNATURE-----
 
 iQFIBAABCgAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmgiRh0UHHBib256aW5p
 QHJlZGhhdC5jb20ACgkQv/vSX3jHroMnKggAjKQU110WwAfC3HODcqIvFoLIrFOX
 zCtrAUNvqFvI917yBsBH0rHghsGnBE260zbo53Fn5SpHtMLsnpelk+PVV3A9gLB8
 9NHfRdGm+n+nBjEZE/dYi3dU6Fk7/OBjp/TP7amC3T7XiG12zoAQdPZQb0oadXkA
 xdXgtWlztYeySn7v9QcStJrgGHYysopawZEQDO8m19DGHnPs0XmznXI1O4689DJU
 ERNITIBK7qxv3efBtrci3iBgibzR70vw6yityK0a01ml5EdABeEFHfVGGkrO+B2U
 ssPMIfmbf9QupADwBS+D1V21WTGla7e0FRAM21UJH93738QCCYjr9nv9qQ==
 =7K+B
 -----END PGP SIGNATURE-----

Merge tag 'for-upstream' of https://gitlab.com/bonzini/qemu into staging

* meson: small old patches (one from 2022)
* rust: pl011: forward port some changes from C version
* target/i386: small improvements to TCG emulation
* target/i386: HVF emulation cleanups
* target/i386: add its_no feature
* cs4231a: fix assertion failure
* update Linux headers

# -----BEGIN PGP SIGNATURE-----
#
# iQFIBAABCgAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmgiRh0UHHBib256aW5p
# QHJlZGhhdC5jb20ACgkQv/vSX3jHroMnKggAjKQU110WwAfC3HODcqIvFoLIrFOX
# zCtrAUNvqFvI917yBsBH0rHghsGnBE260zbo53Fn5SpHtMLsnpelk+PVV3A9gLB8
# 9NHfRdGm+n+nBjEZE/dYi3dU6Fk7/OBjp/TP7amC3T7XiG12zoAQdPZQb0oadXkA
# xdXgtWlztYeySn7v9QcStJrgGHYysopawZEQDO8m19DGHnPs0XmznXI1O4689DJU
# ERNITIBK7qxv3efBtrci3iBgibzR70vw6yityK0a01ml5EdABeEFHfVGGkrO+B2U
# ssPMIfmbf9QupADwBS+D1V21WTGla7e0FRAM21UJH93738QCCYjr9nv9qQ==
# =7K+B
# -----END PGP SIGNATURE-----
# gpg: Signature made Mon 12 May 2025 15:03:57 EDT
# gpg:                using RSA key F13338574B662389866C7682BFFBD25F78C7AE83
# gpg:                issuer "pbonzini@redhat.com"
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [full]
# gpg:                 aka "Paolo Bonzini <pbonzini@redhat.com>" [full]
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4  E2F7 7E15 100C CD36 69B1
#      Subkey fingerprint: F133 3857 4B66 2389 866C  7682 BFFB D25F 78C7 AE83

* tag 'for-upstream' of https://gitlab.com/bonzini/qemu:
  target/i386: Make ITS_NO available to guests
  hw/audio/cs4231a: fix assertion error in isa_bus_get_irq
  linux-headers: update from 6.15 + kvm/next
  target/i386: remove lflags
  target/i386/emulate: mostly rewrite flags handling
  target/i386/emulate: stop overloading decode->op[N].ptr
  target/i386: implement TSS trap bit
  target/i386: move push of error code to switch_tss_ra
  target/i386: list TCG-supported features for CPUID[80000021h].EAX
  target/i386: ignore misplaced REX prefixes
  rust: pl011: Really use RX FIFO depth
  rust: pl011: Rename RX FIFO methods
  modinfo: lookup compile_commands.json by object
  meson: remove unnecessary dependencies from specific_ss
  meson: do not check supported TCG architecture if no emulators built
  meson: drop --enable-avx* options

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2025-05-14 07:16:35 -04:00
commit cacb211471
22 changed files with 423 additions and 342 deletions

View file

@ -1,4 +1,4 @@
if not get_option('tcg').allowed()
if not have_tcg
subdir_done()
endif

View file

@ -119,7 +119,7 @@ QEMU includes four crates:
for the ``hw/char/pl011.c`` and ``hw/timer/hpet.c`` files.
.. [#issues] The ``pl011`` crate is synchronized with ``hw/char/pl011.c``
as of commit 02b1f7f61928. The ``hpet`` crate is synchronized as of
as of commit 3e0f118f82. The ``hpet`` crate is synchronized as of
commit 1433e38cc8. Both are lacking tracing functionality.
This section explains how to work with them.

View file

@ -682,6 +682,10 @@ static void cs4231a_realizefn (DeviceState *dev, Error **errp)
return;
}
if (s->irq >= ISA_NUM_IRQS) {
error_setg(errp, "Invalid IRQ %d (max %d)", s->irq, ISA_NUM_IRQS - 1);
return;
}
s->pic = isa_bus_get_irq(bus, s->irq);
k = ISADMA_GET_CLASS(s->isa_dma);
k->register_channel(s->isa_dma, s->dma, cs_dma_read, s);

View file

@ -439,6 +439,7 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS (1 << 6)
#define KVM_X86_QUIRK_SLOT_ZAP_ALL (1 << 7)
#define KVM_X86_QUIRK_STUFF_FEATURE_MSRS (1 << 8)
#define KVM_X86_QUIRK_IGNORE_GUEST_PAT (1 << 9)
#define KVM_STATE_NESTED_FORMAT_VMX 0
#define KVM_STATE_NESTED_FORMAT_SVM 1
@ -928,4 +929,74 @@ struct kvm_hyperv_eventfd {
#define KVM_X86_SNP_VM 4
#define KVM_X86_TDX_VM 5
/* Trust Domain eXtension sub-ioctl() commands. */
enum kvm_tdx_cmd_id {
KVM_TDX_CAPABILITIES = 0,
KVM_TDX_INIT_VM,
KVM_TDX_INIT_VCPU,
KVM_TDX_INIT_MEM_REGION,
KVM_TDX_FINALIZE_VM,
KVM_TDX_GET_CPUID,
KVM_TDX_CMD_NR_MAX,
};
struct kvm_tdx_cmd {
/* enum kvm_tdx_cmd_id */
__u32 id;
/* flags for sub-commend. If sub-command doesn't use this, set zero. */
__u32 flags;
/*
* data for each sub-command. An immediate or a pointer to the actual
* data in process virtual address. If sub-command doesn't use it,
* set zero.
*/
__u64 data;
/*
* Auxiliary error code. The sub-command may return TDX SEAMCALL
* status code in addition to -Exxx.
*/
__u64 hw_error;
};
struct kvm_tdx_capabilities {
__u64 supported_attrs;
__u64 supported_xfam;
__u64 reserved[254];
/* Configurable CPUID bits for userspace */
struct kvm_cpuid2 cpuid;
};
struct kvm_tdx_init_vm {
__u64 attributes;
__u64 xfam;
__u64 mrconfigid[6]; /* sha384 digest */
__u64 mrowner[6]; /* sha384 digest */
__u64 mrownerconfig[6]; /* sha384 digest */
/* The total space for TD_PARAMS before the CPUIDs is 256 bytes */
__u64 reserved[12];
/*
* Call KVM_TDX_INIT_VM before vcpu creation, thus before
* KVM_SET_CPUID2.
* This configuration supersedes KVM_SET_CPUID2s for VCPUs because the
* TDX module directly virtualizes those CPUIDs without VMM. The user
* space VMM, e.g. qemu, should make KVM_SET_CPUID2 consistent with
* those values. If it doesn't, KVM may have wrong idea of vCPUIDs of
* the guest, and KVM may wrongly emulate CPUIDs or MSRs that the TDX
* module doesn't virtualize.
*/
struct kvm_cpuid2 cpuid;
};
#define KVM_TDX_MEASURE_MEMORY_REGION _BITULL(0)
struct kvm_tdx_init_mem_region {
__u64 source_addr;
__u64 gpa;
__u64 nr_pages;
};
#endif /* _ASM_X86_KVM_H */

View file

@ -369,6 +369,7 @@ struct kvm_run {
#define KVM_SYSTEM_EVENT_WAKEUP 4
#define KVM_SYSTEM_EVENT_SUSPEND 5
#define KVM_SYSTEM_EVENT_SEV_TERM 6
#define KVM_SYSTEM_EVENT_TDX_FATAL 7
__u32 type;
__u32 ndata;
union {

View file

@ -247,6 +247,8 @@ have_vhost_net_vdpa = have_vhost_vdpa and get_option('vhost_net').allowed()
have_vhost_net_kernel = have_vhost_kernel and get_option('vhost_net').allowed()
have_vhost_net = have_vhost_net_kernel or have_vhost_net_user or have_vhost_net_vdpa
have_tcg = get_option('tcg').allowed() and (have_system or have_user)
have_tools = get_option('tools') \
.disable_auto_if(not have_system) \
.allowed()
@ -863,7 +865,7 @@ elif host_os == 'haiku'
cc.find_library('network'),
cc.find_library('bsd')]
elif host_os == 'openbsd'
if get_option('tcg').allowed() and target_dirs.length() > 0
if have_tcg
# Disable OpenBSD W^X if available
emulator_link_args = cc.get_supported_link_arguments('-Wl,-z,wxneeded')
endif
@ -904,7 +906,7 @@ if host_os == 'netbsd'
endif
tcg_arch = host_arch
if get_option('tcg').allowed()
if have_tcg
if host_arch == 'unknown'
if not get_option('tcg_interpreter')
error('Unsupported CPU @0@, try --enable-tcg-interpreter'.format(cpu))
@ -2534,7 +2536,7 @@ config_host_data.set('CONFIG_PIXMAN', pixman.found())
config_host_data.set('CONFIG_SLIRP', slirp.found())
config_host_data.set('CONFIG_SNAPPY', snappy.found())
config_host_data.set('CONFIG_SOLARIS', host_os == 'sunos')
if get_option('tcg').allowed()
if have_tcg
config_host_data.set('CONFIG_TCG', 1)
config_host_data.set('CONFIG_TCG_INTERPRETER', tcg_arch == 'tci')
endif
@ -3097,22 +3099,16 @@ config_host_data.set('CONFIG_ASM_HWPROBE_H',
cc.has_header_symbol('asm/hwprobe.h',
'RISCV_HWPROBE_EXT_ZBA'))
config_host_data.set('CONFIG_AVX2_OPT', get_option('avx2') \
.require(have_cpuid_h, error_message: 'cpuid.h not available, cannot enable AVX2') \
.require(cc.links('''
#include <cpuid.h>
if have_cpuid_h
have_avx2 = cc.links('''
#include <immintrin.h>
static int __attribute__((target("avx2"))) bar(void *a) {
__m256i x = *(__m256i *)a;
return _mm256_testz_si256(x, x);
}
int main(int argc, char *argv[]) { return bar(argv[argc - 1]); }
'''), error_message: 'AVX2 not available').allowed())
config_host_data.set('CONFIG_AVX512BW_OPT', get_option('avx512bw') \
.require(have_cpuid_h, error_message: 'cpuid.h not available, cannot enable AVX512BW') \
.require(cc.links('''
#include <cpuid.h>
''')
have_avx512bw = cc.links('''
#include <immintrin.h>
static int __attribute__((target("avx512bw"))) bar(void *a) {
__m512i *x = a;
@ -3120,7 +3116,21 @@ config_host_data.set('CONFIG_AVX512BW_OPT', get_option('avx512bw') \
return res[1];
}
int main(int argc, char *argv[]) { return bar(argv[0]); }
'''), error_message: 'AVX512BW not available').allowed())
''')
if get_option('x86_version') >= '3' and not have_avx2
error('Cannot enable AVX optimizations due to missing intrinsics')
elif get_option('x86_version') >= '4' and not have_avx512bw
error('Cannot enable AVX512 optimizations due to missing intrinsics')
endif
else
have_avx2 = false
have_avx512bw = false
if get_option('x86_version') >= '3'
error('Cannot enable AVX optimizations due to missing cpuid.h')
endif
endif
config_host_data.set('CONFIG_AVX2_OPT', have_avx2)
config_host_data.set('CONFIG_AVX512BW_OPT', have_avx512bw)
# For both AArch64 and AArch32, detect if builtins are available.
config_host_data.set('CONFIG_ARM_AES_BUILTIN', cc.compiles('''
@ -3893,16 +3903,11 @@ foreach d, list : modules
install: true,
install_dir: qemu_moddir)
if module_ss.sources() != []
# FIXME: Should use sl.extract_all_objects(recursive: true) as
# input. Sources can be used multiple times but objects are
# unique when it comes to lookup in compile_commands.json.
# Depnds on a mesion version with
# https://github.com/mesonbuild/meson/pull/8900
modinfo_files += custom_target(d + '-' + m + '.modinfo',
output: d + '-' + m + '.modinfo',
input: module_ss.sources() + genh,
input: sl.extract_all_objects(recursive: true),
capture: true,
command: [modinfo_collect, module_ss.sources()])
command: [modinfo_collect, '@INPUT@'])
endif
else
if d == 'block'
@ -3941,12 +3946,11 @@ foreach d, list : target_modules
dependencies: target_module_ss.dependencies(),
install: true,
install_dir: qemu_moddir)
# FIXME: Should use sl.extract_all_objects(recursive: true) too.
modinfo_files += custom_target(module_name + '.modinfo',
output: module_name + '.modinfo',
input: target_module_ss.sources() + genh,
input: sl.extract_all_objects(recursive: true),
capture: true,
command: [modinfo_collect, '--target', target, target_module_ss.sources()])
command: [modinfo_collect, '--target', target, '@INPUT@'])
endif
endif
endforeach
@ -4951,7 +4955,7 @@ if host_arch == 'unknown'
message('compile or work on this host CPU. You can help by volunteering')
message('to maintain it and providing a build host for our continuous')
message('integration setup.')
if get_option('tcg').allowed() and target_dirs.length() > 0
if have_tcg
message()
message('configure has succeeded and you can continue to build, but')
message('QEMU will use a slow interpreter to emulate the target CPU.')

View file

@ -123,10 +123,6 @@ option('valgrind', type : 'feature', value: 'auto',
option('membarrier', type: 'feature', value: 'disabled',
description: 'membarrier system call (for Linux 4.14+ or Windows')
option('avx2', type: 'feature', value: 'auto',
description: 'AVX2 optimizations')
option('avx512bw', type: 'feature', value: 'auto',
description: 'AVX512BW optimizations')
option('keyring', type: 'feature', value: 'auto',
description: 'Linux keyring support')
option('libkeyutils', type: 'feature', value: 'auto',

View file

@ -329,7 +329,7 @@ impl PL011Registers {
// hardware flow-control is enabled.
//
// For simplicity, the above described is not emulated.
self.loopback_enabled() && self.put_fifo(value)
self.loopback_enabled() && self.fifo_rx_put(value)
}
#[must_use]
@ -439,7 +439,7 @@ impl PL011Registers {
}
#[must_use]
pub fn put_fifo(&mut self, value: registers::Data) -> bool {
pub fn fifo_rx_put(&mut self, value: registers::Data) -> bool {
let depth = self.fifo_depth();
assert!(depth > 0);
let slot = (self.read_pos + self.read_count) & (depth - 1);
@ -580,19 +580,26 @@ impl PL011State {
fn can_receive(&self) -> u32 {
let regs = self.regs.borrow();
// trace_pl011_can_receive(s->lcr, s->read_count, r);
u32::from(regs.read_count < regs.fifo_depth())
regs.fifo_depth() - regs.read_count
}
fn receive(&self, buf: &[u8]) {
if buf.is_empty() {
let mut regs = self.regs.borrow_mut();
if regs.loopback_enabled() {
// In loopback mode, the RX input signal is internally disconnected
// from the entire receiving logics; thus, all inputs are ignored,
// and BREAK detection on RX input signal is also not performed.
return;
}
let mut regs = self.regs.borrow_mut();
let c: u32 = buf[0].into();
let update_irq = !regs.loopback_enabled() && regs.put_fifo(c.into());
let mut update_irq = false;
for &c in buf {
let c: u32 = c.into();
update_irq |= regs.fifo_rx_put(c.into());
}
// Release the BqlRefCell before calling self.update()
drop(regs);
if update_irq {
self.update();
}
@ -602,7 +609,7 @@ impl PL011State {
let mut update_irq = false;
let mut regs = self.regs.borrow_mut();
if event == Event::CHR_EVENT_BREAK && !regs.loopback_enabled() {
update_irq = regs.put_fifo(registers::Data::BREAK);
update_irq = regs.fifo_rx_put(registers::Data::BREAK);
}
// Release the BqlRefCell before calling self.update()
drop(regs);

View file

@ -97,8 +97,6 @@ meson_options_help() {
printf "%s\n" ' alsa ALSA sound support'
printf "%s\n" ' attr attr/xattr support'
printf "%s\n" ' auth-pam PAM access control'
printf "%s\n" ' avx2 AVX2 optimizations'
printf "%s\n" ' avx512bw AVX512BW optimizations'
printf "%s\n" ' blkio libblkio block device driver'
printf "%s\n" ' bochs bochs image format support'
printf "%s\n" ' bpf eBPF support'
@ -244,10 +242,6 @@ _meson_option_parse() {
--audio-drv-list=*) quote_sh "-Daudio_drv_list=$2" ;;
--enable-auth-pam) printf "%s" -Dauth_pam=enabled ;;
--disable-auth-pam) printf "%s" -Dauth_pam=disabled ;;
--enable-avx2) printf "%s" -Davx2=enabled ;;
--disable-avx2) printf "%s" -Davx2=disabled ;;
--enable-avx512bw) printf "%s" -Davx512bw=enabled ;;
--disable-avx512bw) printf "%s" -Davx512bw=disabled ;;
--enable-gcov) printf "%s" -Db_coverage=true ;;
--disable-gcov) printf "%s" -Db_coverage=false ;;
--enable-lto) printf "%s" -Db_lto=true ;;

View file

@ -7,15 +7,6 @@ import json
import shlex
import subprocess
def find_command(src, target, compile_commands):
for command in compile_commands:
if command['file'] != src:
continue
if target != '' and command['command'].find(target) == -1:
continue
return command['command']
return 'false'
def process_command(src, command):
skip = False
out = []
@ -43,14 +34,22 @@ def main(args):
print("MODINFO_DEBUG target %s" % target)
arch = target[:-8] # cut '-softmmu'
print("MODINFO_START arch \"%s\" MODINFO_END" % arch)
with open('compile_commands.json') as f:
compile_commands = json.load(f)
for src in args:
compile_commands_json = json.load(f)
compile_commands = { x['output']: x for x in compile_commands_json }
for obj in args:
entry = compile_commands.get(obj, None)
if not entry:
sys.stderr.print('modinfo: Could not find object file', obj)
sys.exit(1)
src = entry['file']
if not src.endswith('.c'):
print("MODINFO_DEBUG skip %s" % src)
continue
command = entry['command']
print("MODINFO_DEBUG src %s" % src)
command = find_command(src, target, compile_commands)
cmdline = process_command(src, command)
print("MODINFO_DEBUG cmd", cmdline)
result = subprocess.run(cmdline, stdout = subprocess.PIPE,

View file

@ -922,6 +922,17 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
#define TCG_8000_0008_EBX (CPUID_8000_0008_EBX_XSAVEERPTR | \
CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_KERNEL_FEATURES)
#if defined CONFIG_USER_ONLY
#define CPUID_8000_0021_EAX_KERNEL_FEATURES CPUID_8000_0021_EAX_AUTO_IBRS
#else
#define CPUID_8000_0021_EAX_KERNEL_FEATURES 0
#endif
#define TCG_8000_0021_EAX_FEATURES ( \
CPUID_8000_0021_EAX_NO_NESTED_DATA_BP | \
CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE | \
CPUID_8000_0021_EAX_KERNEL_FEATURES)
FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
[FEAT_1_EDX] = {
.type = CPUID_FEATURE_WORD,
@ -1249,7 +1260,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
"ibpb-brtype", "srso-no", "srso-user-kernel-no", NULL,
},
.cpuid = { .eax = 0x80000021, .reg = R_EAX, },
.tcg_features = 0,
.tcg_features = TCG_8000_0021_EAX_FEATURES,
.unmigratable_flags = 0,
},
[FEAT_8000_0021_EBX] = {
@ -1372,6 +1383,14 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
"bhi-no", NULL, NULL, NULL,
"pbrsb-no", NULL, "gds-no", "rfds-no",
"rfds-clear", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, "its-no", NULL,
},
.msr = {
.index = MSR_IA32_ARCH_CAPABILITIES,

View file

@ -1805,11 +1805,6 @@ typedef struct CPUCaches {
CPUCacheInfo *l3_cache;
} CPUCaches;
typedef struct X86LazyFlags {
target_ulong result;
target_ulong auxbits;
} X86LazyFlags;
typedef struct CPUArchState {
/* standard registers */
target_ulong regs[CPU_NB_REGS];
@ -2102,7 +2097,6 @@ typedef struct CPUArchState {
QemuMutex xen_timers_lock;
#endif
#if defined(CONFIG_HVF)
X86LazyFlags lflags;
void *emu_mmio_buf;
#endif

View file

@ -109,8 +109,8 @@ static void decode_modrm_reg(CPUX86State *env, struct x86_decode *decode,
{
op->type = X86_VAR_REG;
op->reg = decode->modrm.reg;
op->ptr = get_reg_ref(env, op->reg, decode->rex.rex, decode->rex.r,
decode->operand_size);
op->regptr = get_reg_ref(env, op->reg, decode->rex.rex, decode->rex.r,
decode->operand_size);
}
static void decode_rax(CPUX86State *env, struct x86_decode *decode,
@ -119,8 +119,8 @@ static void decode_rax(CPUX86State *env, struct x86_decode *decode,
op->type = X86_VAR_REG;
op->reg = R_EAX;
/* Since reg is always AX, REX prefix has no impact. */
op->ptr = get_reg_ref(env, op->reg, false, 0,
decode->operand_size);
op->regptr = get_reg_ref(env, op->reg, false, 0,
decode->operand_size);
}
static inline void decode_immediate(CPUX86State *env, struct x86_decode *decode,
@ -262,16 +262,16 @@ static void decode_incgroup(CPUX86State *env, struct x86_decode *decode)
{
decode->op[0].type = X86_VAR_REG;
decode->op[0].reg = decode->opcode[0] - 0x40;
decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
decode->rex.b, decode->operand_size);
decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
decode->rex.b, decode->operand_size);
}
static void decode_decgroup(CPUX86State *env, struct x86_decode *decode)
{
decode->op[0].type = X86_VAR_REG;
decode->op[0].reg = decode->opcode[0] - 0x48;
decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
decode->rex.b, decode->operand_size);
decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
decode->rex.b, decode->operand_size);
}
static void decode_incgroup2(CPUX86State *env, struct x86_decode *decode)
@ -287,16 +287,16 @@ static void decode_pushgroup(CPUX86State *env, struct x86_decode *decode)
{
decode->op[0].type = X86_VAR_REG;
decode->op[0].reg = decode->opcode[0] - 0x50;
decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
decode->rex.b, decode->operand_size);
decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
decode->rex.b, decode->operand_size);
}
static void decode_popgroup(CPUX86State *env, struct x86_decode *decode)
{
decode->op[0].type = X86_VAR_REG;
decode->op[0].reg = decode->opcode[0] - 0x58;
decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
decode->rex.b, decode->operand_size);
decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
decode->rex.b, decode->operand_size);
}
static void decode_jxx(CPUX86State *env, struct x86_decode *decode)
@ -377,16 +377,16 @@ static void decode_xchgroup(CPUX86State *env, struct x86_decode *decode)
{
decode->op[0].type = X86_VAR_REG;
decode->op[0].reg = decode->opcode[0] - 0x90;
decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
decode->rex.b, decode->operand_size);
decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
decode->rex.b, decode->operand_size);
}
static void decode_movgroup(CPUX86State *env, struct x86_decode *decode)
{
decode->op[0].type = X86_VAR_REG;
decode->op[0].reg = decode->opcode[0] - 0xb8;
decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
decode->rex.b, decode->operand_size);
decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
decode->rex.b, decode->operand_size);
decode_immediate(env, decode, &decode->op[1], decode->operand_size);
}
@ -394,15 +394,15 @@ static void fetch_moffs(CPUX86State *env, struct x86_decode *decode,
struct x86_decode_op *op)
{
op->type = X86_VAR_OFFSET;
op->ptr = decode_bytes(env, decode, decode->addressing_size);
op->addr = decode_bytes(env, decode, decode->addressing_size);
}
static void decode_movgroup8(CPUX86State *env, struct x86_decode *decode)
{
decode->op[0].type = X86_VAR_REG;
decode->op[0].reg = decode->opcode[0] - 0xb0;
decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
decode->rex.b, decode->operand_size);
decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
decode->rex.b, decode->operand_size);
decode_immediate(env, decode, &decode->op[1], decode->operand_size);
}
@ -411,8 +411,8 @@ static void decode_rcx(CPUX86State *env, struct x86_decode *decode,
{
op->type = X86_VAR_REG;
op->reg = R_ECX;
op->ptr = get_reg_ref(env, op->reg, decode->rex.rex, decode->rex.b,
decode->operand_size);
op->regptr = get_reg_ref(env, op->reg, decode->rex.rex, decode->rex.b,
decode->operand_size);
}
struct decode_tbl {
@ -631,8 +631,8 @@ static void decode_bswap(CPUX86State *env, struct x86_decode *decode)
{
decode->op[0].type = X86_VAR_REG;
decode->op[0].reg = decode->opcode[1] - 0xc8;
decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
decode->rex.b, decode->operand_size);
decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
decode->rex.b, decode->operand_size);
}
static void decode_d9_4(CPUX86State *env, struct x86_decode *decode)
@ -1656,16 +1656,16 @@ void calc_modrm_operand16(CPUX86State *env, struct x86_decode *decode,
}
calc_addr:
if (X86_DECODE_CMD_LEA == decode->cmd) {
op->ptr = (uint16_t)ptr;
op->addr = (uint16_t)ptr;
} else {
op->ptr = decode_linear_addr(env, decode, (uint16_t)ptr, seg);
op->addr = decode_linear_addr(env, decode, (uint16_t)ptr, seg);
}
}
target_ulong get_reg_ref(CPUX86State *env, int reg, int rex_present,
void *get_reg_ref(CPUX86State *env, int reg, int rex_present,
int is_extended, int size)
{
target_ulong ptr = 0;
void *ptr = NULL;
if (is_extended) {
reg |= R_R8;
@ -1674,13 +1674,13 @@ target_ulong get_reg_ref(CPUX86State *env, int reg, int rex_present,
switch (size) {
case 1:
if (is_extended || reg < 4 || rex_present) {
ptr = (target_ulong)&RL(env, reg);
ptr = &RL(env, reg);
} else {
ptr = (target_ulong)&RH(env, reg - 4);
ptr = &RH(env, reg - 4);
}
break;
default:
ptr = (target_ulong)&RRX(env, reg);
ptr = &RRX(env, reg);
break;
}
return ptr;
@ -1691,7 +1691,7 @@ target_ulong get_reg_val(CPUX86State *env, int reg, int rex_present,
{
target_ulong val = 0;
memcpy(&val,
(void *)get_reg_ref(env, reg, rex_present, is_extended, size),
get_reg_ref(env, reg, rex_present, is_extended, size),
size);
return val;
}
@ -1758,9 +1758,9 @@ void calc_modrm_operand32(CPUX86State *env, struct x86_decode *decode,
}
if (X86_DECODE_CMD_LEA == decode->cmd) {
op->ptr = (uint32_t)ptr;
op->addr = (uint32_t)ptr;
} else {
op->ptr = decode_linear_addr(env, decode, (uint32_t)ptr, seg);
op->addr = decode_linear_addr(env, decode, (uint32_t)ptr, seg);
}
}
@ -1788,9 +1788,9 @@ void calc_modrm_operand64(CPUX86State *env, struct x86_decode *decode,
}
if (X86_DECODE_CMD_LEA == decode->cmd) {
op->ptr = ptr;
op->addr = ptr;
} else {
op->ptr = decode_linear_addr(env, decode, ptr, seg);
op->addr = decode_linear_addr(env, decode, ptr, seg);
}
}
@ -1801,8 +1801,8 @@ void calc_modrm_operand(CPUX86State *env, struct x86_decode *decode,
if (3 == decode->modrm.mod) {
op->reg = decode->modrm.reg;
op->type = X86_VAR_REG;
op->ptr = get_reg_ref(env, decode->modrm.rm, decode->rex.rex,
decode->rex.b, decode->operand_size);
op->regptr = get_reg_ref(env, decode->modrm.rm, decode->rex.rex,
decode->rex.b, decode->operand_size);
return;
}

View file

@ -266,7 +266,10 @@ typedef struct x86_decode_op {
int reg;
target_ulong val;
target_ulong ptr;
union {
target_ulong addr;
void *regptr;
};
} x86_decode_op;
typedef struct x86_decode {
@ -301,8 +304,8 @@ uint64_t sign(uint64_t val, int size);
uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode);
target_ulong get_reg_ref(CPUX86State *env, int reg, int rex_present,
int is_extended, int size);
void *get_reg_ref(CPUX86State *env, int reg, int rex_present,
int is_extended, int size);
target_ulong get_reg_val(CPUX86State *env, int reg, int rex_present,
int is_extended, int size);
void calc_modrm_operand(CPUX86State *env, struct x86_decode *decode,

View file

@ -52,7 +52,7 @@
uint8_t v2 = (uint8_t)decode->op[1].val; \
uint8_t diff = v1 cmd v2; \
if (save_res) { \
write_val_ext(env, decode->op[0].ptr, diff, 1); \
write_val_ext(env, &decode->op[0], diff, 1); \
} \
FLAGS_FUNC##8(env, v1, v2, diff); \
break; \
@ -63,7 +63,7 @@
uint16_t v2 = (uint16_t)decode->op[1].val; \
uint16_t diff = v1 cmd v2; \
if (save_res) { \
write_val_ext(env, decode->op[0].ptr, diff, 2); \
write_val_ext(env, &decode->op[0], diff, 2); \
} \
FLAGS_FUNC##16(env, v1, v2, diff); \
break; \
@ -74,7 +74,7 @@
uint32_t v2 = (uint32_t)decode->op[1].val; \
uint32_t diff = v1 cmd v2; \
if (save_res) { \
write_val_ext(env, decode->op[0].ptr, diff, 4); \
write_val_ext(env, &decode->op[0], diff, 4); \
} \
FLAGS_FUNC##32(env, v1, v2, diff); \
break; \
@ -121,7 +121,7 @@ void write_reg(CPUX86State *env, int reg, target_ulong val, int size)
}
}
target_ulong read_val_from_reg(target_ulong reg_ptr, int size)
target_ulong read_val_from_reg(void *reg_ptr, int size)
{
target_ulong val;
@ -144,7 +144,7 @@ target_ulong read_val_from_reg(target_ulong reg_ptr, int size)
return val;
}
void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size)
void write_val_to_reg(void *reg_ptr, target_ulong val, int size)
{
switch (size) {
case 1:
@ -164,18 +164,18 @@ void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size)
}
}
static bool is_host_reg(CPUX86State *env, target_ulong ptr)
static void write_val_to_mem(CPUX86State *env, target_ulong ptr, target_ulong val, int size)
{
return (ptr - (target_ulong)&env->regs[0]) < sizeof(env->regs);
emul_ops->write_mem(env_cpu(env), &val, ptr, size);
}
void write_val_ext(CPUX86State *env, target_ulong ptr, target_ulong val, int size)
void write_val_ext(CPUX86State *env, struct x86_decode_op *decode, target_ulong val, int size)
{
if (is_host_reg(env, ptr)) {
write_val_to_reg(ptr, val, size);
return;
if (decode->type == X86_VAR_REG) {
write_val_to_reg(decode->regptr, val, size);
} else {
write_val_to_mem(env, decode->addr, val, size);
}
emul_ops->write_mem(env_cpu(env), &val, ptr, size);
}
uint8_t *read_mmio(CPUX86State *env, target_ulong ptr, int bytes)
@ -185,15 +185,11 @@ uint8_t *read_mmio(CPUX86State *env, target_ulong ptr, int bytes)
}
target_ulong read_val_ext(CPUX86State *env, target_ulong ptr, int size)
static target_ulong read_val_from_mem(CPUX86State *env, target_long ptr, int size)
{
target_ulong val;
uint8_t *mmio_ptr;
if (is_host_reg(env, ptr)) {
return read_val_from_reg(ptr, size);
}
mmio_ptr = read_mmio(env, ptr, size);
switch (size) {
case 1:
@ -215,6 +211,15 @@ target_ulong read_val_ext(CPUX86State *env, target_ulong ptr, int size)
return val;
}
target_ulong read_val_ext(CPUX86State *env, struct x86_decode_op *decode, int size)
{
if (decode->type == X86_VAR_REG) {
return read_val_from_reg(decode->regptr, size);
} else {
return read_val_from_mem(env, decode->addr, size);
}
}
static void fetch_operands(CPUX86State *env, struct x86_decode *decode,
int n, bool val_op0, bool val_op1, bool val_op2)
{
@ -226,25 +231,25 @@ static void fetch_operands(CPUX86State *env, struct x86_decode *decode,
case X86_VAR_IMMEDIATE:
break;
case X86_VAR_REG:
VM_PANIC_ON(!decode->op[i].ptr);
VM_PANIC_ON(!decode->op[i].regptr);
if (calc_val[i]) {
decode->op[i].val = read_val_from_reg(decode->op[i].ptr,
decode->op[i].val = read_val_from_reg(decode->op[i].regptr,
decode->operand_size);
}
break;
case X86_VAR_RM:
calc_modrm_operand(env, decode, &decode->op[i]);
if (calc_val[i]) {
decode->op[i].val = read_val_ext(env, decode->op[i].ptr,
decode->op[i].val = read_val_ext(env, &decode->op[i],
decode->operand_size);
}
break;
case X86_VAR_OFFSET:
decode->op[i].ptr = decode_linear_addr(env, decode,
decode->op[i].ptr,
R_DS);
decode->op[i].addr = decode_linear_addr(env, decode,
decode->op[i].addr,
R_DS);
if (calc_val[i]) {
decode->op[i].val = read_val_ext(env, decode->op[i].ptr,
decode->op[i].val = read_val_ext(env, &decode->op[i],
decode->operand_size);
}
break;
@ -257,7 +262,7 @@ static void fetch_operands(CPUX86State *env, struct x86_decode *decode,
static void exec_mov(CPUX86State *env, struct x86_decode *decode)
{
fetch_operands(env, decode, 2, false, true, false);
write_val_ext(env, decode->op[0].ptr, decode->op[1].val,
write_val_ext(env, &decode->op[0], decode->op[1].val,
decode->operand_size);
env->eip += decode->len;
@ -312,7 +317,7 @@ static void exec_neg(CPUX86State *env, struct x86_decode *decode)
fetch_operands(env, decode, 2, true, true, false);
val = 0 - sign(decode->op[1].val, decode->operand_size);
write_val_ext(env, decode->op[1].ptr, val, decode->operand_size);
write_val_ext(env, &decode->op[1], val, decode->operand_size);
if (4 == decode->operand_size) {
SET_FLAGS_OSZAPC_SUB32(env, 0, 0 - val, val);
@ -363,7 +368,7 @@ static void exec_not(CPUX86State *env, struct x86_decode *decode)
{
fetch_operands(env, decode, 1, true, false, false);
write_val_ext(env, decode->op[0].ptr, ~decode->op[0].val,
write_val_ext(env, &decode->op[0], ~decode->op[0].val,
decode->operand_size);
env->eip += decode->len;
}
@ -382,8 +387,8 @@ void exec_movzx(CPUX86State *env, struct x86_decode *decode)
}
decode->operand_size = src_op_size;
calc_modrm_operand(env, decode, &decode->op[1]);
decode->op[1].val = read_val_ext(env, decode->op[1].ptr, src_op_size);
write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
decode->op[1].val = read_val_ext(env, &decode->op[1], src_op_size);
write_val_ext(env, &decode->op[0], decode->op[1].val, op_size);
env->eip += decode->len;
}
@ -469,10 +474,10 @@ static inline void string_rep(CPUX86State *env, struct x86_decode *decode,
while (rcx--) {
func(env, decode);
write_reg(env, R_ECX, rcx, decode->addressing_size);
if ((PREFIX_REP == rep) && !get_ZF(env)) {
if ((PREFIX_REP == rep) && !env->cc_dst) {
break;
}
if ((PREFIX_REPN == rep) && get_ZF(env)) {
if ((PREFIX_REPN == rep) && env->cc_dst) {
break;
}
}
@ -535,8 +540,8 @@ static void exec_movs_single(CPUX86State *env, struct x86_decode *decode)
dst_addr = linear_addr_size(env_cpu(env), RDI(env),
decode->addressing_size, R_ES);
val = read_val_ext(env, src_addr, decode->operand_size);
write_val_ext(env, dst_addr, val, decode->operand_size);
val = read_val_from_mem(env, src_addr, decode->operand_size);
write_val_to_mem(env, dst_addr, val, decode->operand_size);
string_increment_reg(env, R_ESI, decode);
string_increment_reg(env, R_EDI, decode);
@ -563,9 +568,9 @@ static void exec_cmps_single(CPUX86State *env, struct x86_decode *decode)
decode->addressing_size, R_ES);
decode->op[0].type = X86_VAR_IMMEDIATE;
decode->op[0].val = read_val_ext(env, src_addr, decode->operand_size);
decode->op[0].val = read_val_from_mem(env, src_addr, decode->operand_size);
decode->op[1].type = X86_VAR_IMMEDIATE;
decode->op[1].val = read_val_ext(env, dst_addr, decode->operand_size);
decode->op[1].val = read_val_from_mem(env, dst_addr, decode->operand_size);
EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
@ -697,15 +702,15 @@ static void do_bt(CPUX86State *env, struct x86_decode *decode, int flag)
if (decode->op[0].type != X86_VAR_REG) {
if (4 == decode->operand_size) {
displacement = ((int32_t) (decode->op[1].val & 0xffffffe0)) / 32;
decode->op[0].ptr += 4 * displacement;
decode->op[0].addr += 4 * displacement;
} else if (2 == decode->operand_size) {
displacement = ((int16_t) (decode->op[1].val & 0xfff0)) / 16;
decode->op[0].ptr += 2 * displacement;
decode->op[0].addr += 2 * displacement;
} else {
VM_PANIC("bt 64bit\n");
}
}
decode->op[0].val = read_val_ext(env, decode->op[0].ptr,
decode->op[0].val = read_val_ext(env, &decode->op[0],
decode->operand_size);
cf = (decode->op[0].val >> index) & 0x01;
@ -723,7 +728,7 @@ static void do_bt(CPUX86State *env, struct x86_decode *decode, int flag)
decode->op[0].val &= ~(1u << index);
break;
}
write_val_ext(env, decode->op[0].ptr, decode->op[0].val,
write_val_ext(env, &decode->op[0], decode->op[0].val,
decode->operand_size);
set_CF(env, cf);
}
@ -775,7 +780,7 @@ void exec_shl(CPUX86State *env, struct x86_decode *decode)
of = cf ^ (res >> 7);
}
write_val_ext(env, decode->op[0].ptr, res, 1);
write_val_ext(env, &decode->op[0], res, 1);
SET_FLAGS_OSZAPC_LOGIC8(env, 0, 0, res);
SET_FLAGS_OxxxxC(env, of, cf);
break;
@ -791,7 +796,7 @@ void exec_shl(CPUX86State *env, struct x86_decode *decode)
of = cf ^ (res >> 15); /* of = cf ^ result15 */
}
write_val_ext(env, decode->op[0].ptr, res, 2);
write_val_ext(env, &decode->op[0], res, 2);
SET_FLAGS_OSZAPC_LOGIC16(env, 0, 0, res);
SET_FLAGS_OxxxxC(env, of, cf);
break;
@ -800,7 +805,7 @@ void exec_shl(CPUX86State *env, struct x86_decode *decode)
{
uint32_t res = decode->op[0].val << count;
write_val_ext(env, decode->op[0].ptr, res, 4);
write_val_ext(env, &decode->op[0], res, 4);
SET_FLAGS_OSZAPC_LOGIC32(env, 0, 0, res);
cf = (decode->op[0].val >> (32 - count)) & 0x1;
of = cf ^ (res >> 31); /* of = cf ^ result31 */
@ -831,10 +836,10 @@ void exec_movsx(CPUX86State *env, struct x86_decode *decode)
decode->operand_size = src_op_size;
calc_modrm_operand(env, decode, &decode->op[1]);
decode->op[1].val = sign(read_val_ext(env, decode->op[1].ptr, src_op_size),
decode->op[1].val = sign(read_val_ext(env, &decode->op[1], src_op_size),
src_op_size);
write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
write_val_ext(env, &decode->op[0], decode->op[1].val, op_size);
env->eip += decode->len;
}
@ -862,7 +867,7 @@ void exec_ror(CPUX86State *env, struct x86_decode *decode)
count &= 0x7; /* use only bottom 3 bits */
res = ((uint8_t)decode->op[0].val >> count) |
((uint8_t)decode->op[0].val << (8 - count));
write_val_ext(env, decode->op[0].ptr, res, 1);
write_val_ext(env, &decode->op[0], res, 1);
bit6 = (res >> 6) & 1;
bit7 = (res >> 7) & 1;
/* set eflags: ROR count affects the following flags: C, O */
@ -886,7 +891,7 @@ void exec_ror(CPUX86State *env, struct x86_decode *decode)
count &= 0x0f; /* use only 4 LSB's */
res = ((uint16_t)decode->op[0].val >> count) |
((uint16_t)decode->op[0].val << (16 - count));
write_val_ext(env, decode->op[0].ptr, res, 2);
write_val_ext(env, &decode->op[0], res, 2);
bit14 = (res >> 14) & 1;
bit15 = (res >> 15) & 1;
@ -904,7 +909,7 @@ void exec_ror(CPUX86State *env, struct x86_decode *decode)
if (count) {
res = ((uint32_t)decode->op[0].val >> count) |
((uint32_t)decode->op[0].val << (32 - count));
write_val_ext(env, decode->op[0].ptr, res, 4);
write_val_ext(env, &decode->op[0], res, 4);
bit31 = (res >> 31) & 1;
bit30 = (res >> 30) & 1;
@ -941,7 +946,7 @@ void exec_rol(CPUX86State *env, struct x86_decode *decode)
res = ((uint8_t)decode->op[0].val << count) |
((uint8_t)decode->op[0].val >> (8 - count));
write_val_ext(env, decode->op[0].ptr, res, 1);
write_val_ext(env, &decode->op[0], res, 1);
/* set eflags:
* ROL count affects the following flags: C, O
*/
@ -968,7 +973,7 @@ void exec_rol(CPUX86State *env, struct x86_decode *decode)
res = ((uint16_t)decode->op[0].val << count) |
((uint16_t)decode->op[0].val >> (16 - count));
write_val_ext(env, decode->op[0].ptr, res, 2);
write_val_ext(env, &decode->op[0], res, 2);
bit0 = (res & 0x1);
bit15 = (res >> 15);
/* of = cf ^ result15 */
@ -986,7 +991,7 @@ void exec_rol(CPUX86State *env, struct x86_decode *decode)
res = ((uint32_t)decode->op[0].val << count) |
((uint32_t)decode->op[0].val >> (32 - count));
write_val_ext(env, decode->op[0].ptr, res, 4);
write_val_ext(env, &decode->op[0], res, 4);
bit0 = (res & 0x1);
bit31 = (res >> 31);
/* of = cf ^ result31 */
@ -1024,7 +1029,7 @@ void exec_rcl(CPUX86State *env, struct x86_decode *decode)
(op1_8 >> (9 - count));
}
write_val_ext(env, decode->op[0].ptr, res, 1);
write_val_ext(env, &decode->op[0], res, 1);
cf = (op1_8 >> (8 - count)) & 0x01;
of = cf ^ (res >> 7); /* of = cf ^ result7 */
@ -1050,7 +1055,7 @@ void exec_rcl(CPUX86State *env, struct x86_decode *decode)
(op1_16 >> (17 - count));
}
write_val_ext(env, decode->op[0].ptr, res, 2);
write_val_ext(env, &decode->op[0], res, 2);
cf = (op1_16 >> (16 - count)) & 0x1;
of = cf ^ (res >> 15); /* of = cf ^ result15 */
@ -1073,7 +1078,7 @@ void exec_rcl(CPUX86State *env, struct x86_decode *decode)
(op1_32 >> (33 - count));
}
write_val_ext(env, decode->op[0].ptr, res, 4);
write_val_ext(env, &decode->op[0], res, 4);
cf = (op1_32 >> (32 - count)) & 0x1;
of = cf ^ (res >> 31); /* of = cf ^ result31 */
@ -1105,7 +1110,7 @@ void exec_rcr(CPUX86State *env, struct x86_decode *decode)
res = (op1_8 >> count) | (get_CF(env) << (8 - count)) |
(op1_8 << (9 - count));
write_val_ext(env, decode->op[0].ptr, res, 1);
write_val_ext(env, &decode->op[0], res, 1);
cf = (op1_8 >> (count - 1)) & 0x1;
of = (((res << 1) ^ res) >> 7) & 0x1; /* of = result6 ^ result7 */
@ -1124,7 +1129,7 @@ void exec_rcr(CPUX86State *env, struct x86_decode *decode)
res = (op1_16 >> count) | (get_CF(env) << (16 - count)) |
(op1_16 << (17 - count));
write_val_ext(env, decode->op[0].ptr, res, 2);
write_val_ext(env, &decode->op[0], res, 2);
cf = (op1_16 >> (count - 1)) & 0x1;
of = ((uint16_t)((res << 1) ^ res) >> 15) & 0x1; /* of = result15 ^
@ -1148,7 +1153,7 @@ void exec_rcr(CPUX86State *env, struct x86_decode *decode)
(op1_32 << (33 - count));
}
write_val_ext(env, decode->op[0].ptr, res, 4);
write_val_ext(env, &decode->op[0], res, 4);
cf = (op1_32 >> (count - 1)) & 0x1;
of = ((res << 1) ^ res) >> 31; /* of = result30 ^ result31 */
@ -1163,9 +1168,9 @@ static void exec_xchg(CPUX86State *env, struct x86_decode *decode)
{
fetch_operands(env, decode, 2, true, true, false);
write_val_ext(env, decode->op[0].ptr, decode->op[1].val,
write_val_ext(env, &decode->op[0], decode->op[1].val,
decode->operand_size);
write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
write_val_ext(env, &decode->op[1], decode->op[0].val,
decode->operand_size);
env->eip += decode->len;
@ -1174,7 +1179,7 @@ static void exec_xchg(CPUX86State *env, struct x86_decode *decode)
static void exec_xadd(CPUX86State *env, struct x86_decode *decode)
{
EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);
write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
write_val_ext(env, &decode->op[1], decode->op[0].val,
decode->operand_size);
env->eip += decode->len;

View file

@ -42,11 +42,11 @@ void x86_emul_raise_exception(CPUX86State *env, int exception_index, int error_c
target_ulong read_reg(CPUX86State *env, int reg, int size);
void write_reg(CPUX86State *env, int reg, target_ulong val, int size);
target_ulong read_val_from_reg(target_ulong reg_ptr, int size);
void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size);
void write_val_ext(CPUX86State *env, target_ulong ptr, target_ulong val, int size);
target_ulong read_val_from_reg(void *reg_ptr, int size);
void write_val_to_reg(void *reg_ptr, target_ulong val, int size);
void write_val_ext(CPUX86State *env, struct x86_decode_op *decode, target_ulong val, int size);
uint8_t *read_mmio(CPUX86State *env, target_ulong ptr, int bytes);
target_ulong read_val_ext(CPUX86State *env, target_ulong ptr, int size);
target_ulong read_val_ext(CPUX86State *env, struct x86_decode_op *decode, int size);
void exec_movzx(CPUX86State *env, struct x86_decode *decode);
void exec_shl(CPUX86State *env, struct x86_decode *decode);

View file

@ -29,41 +29,50 @@
#include "x86.h"
/* this is basically bocsh code */
/*
* The algorithms here are similar to those in Bochs. After an ALU
* operation, CC_DST can be used to compute ZF, SF and PF, whereas
* CC_SRC is used to compute AF, CF and OF. In reality, SF and PF are the
* XOR of the value computed from CC_DST and the value found in bits 7 and 2
* of CC_SRC; this way the same logic can be used to compute the flags
* both before and after an ALU operation.
*
* Compared to the TCG CC_OP codes, this avoids conditionals when converting
* to and from the RFLAGS representation.
*/
#define LF_SIGN_BIT 31
#define LF_SIGN_BIT (TARGET_LONG_BITS - 1)
#define LF_BIT_SD (0) /* lazy Sign Flag Delta */
#define LF_BIT_AF (3) /* lazy Adjust flag */
#define LF_BIT_PDB (8) /* lazy Parity Delta Byte (8 bits) */
#define LF_BIT_CF (31) /* lazy Carry Flag */
#define LF_BIT_PO (30) /* lazy Partial Overflow = CF ^ OF */
#define LF_BIT_PD (2) /* lazy Parity Delta, same bit as PF */
#define LF_BIT_AF (3) /* lazy Adjust flag */
#define LF_BIT_SD (7) /* lazy Sign Flag Delta, same bit as SF */
#define LF_BIT_CF (TARGET_LONG_BITS - 1) /* lazy Carry Flag */
#define LF_BIT_PO (TARGET_LONG_BITS - 2) /* lazy Partial Overflow = CF ^ OF */
#define LF_MASK_SD (0x01 << LF_BIT_SD)
#define LF_MASK_AF (0x01 << LF_BIT_AF)
#define LF_MASK_PDB (0xFF << LF_BIT_PDB)
#define LF_MASK_CF (0x01 << LF_BIT_CF)
#define LF_MASK_PO (0x01 << LF_BIT_PO)
#define LF_MASK_PD ((target_ulong)0x01 << LF_BIT_PD)
#define LF_MASK_AF ((target_ulong)0x01 << LF_BIT_AF)
#define LF_MASK_SD ((target_ulong)0x01 << LF_BIT_SD)
#define LF_MASK_CF ((target_ulong)0x01 << LF_BIT_CF)
#define LF_MASK_PO ((target_ulong)0x01 << LF_BIT_PO)
/* ******************* */
/* OSZAPC */
/* ******************* */
/* size, carries, result */
/* use carries to fill in AF, PO and CF, while ensuring PD and SD are clear.
* for full-word operations just clear PD and SD; for smaller operand
* sizes only keep AF in the low byte and shift the carries left to
* place PO and CF in the top two bits.
*/
#define SET_FLAGS_OSZAPC_SIZE(size, lf_carries, lf_result) { \
target_ulong temp = ((lf_carries) & (LF_MASK_AF)) | \
(((lf_carries) >> (size - 2)) << LF_BIT_PO); \
env->lflags.result = (target_ulong)(int##size##_t)(lf_result); \
if ((size) == 32) { \
temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \
} else if ((size) == 16) { \
temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 16); \
} else if ((size) == 8) { \
temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 24); \
env->cc_dst = (target_ulong)(int##size##_t)(lf_result); \
target_ulong temp = (lf_carries); \
if ((size) == TARGET_LONG_BITS) { \
temp = temp & ~(LF_MASK_PD | LF_MASK_SD); \
} else { \
VM_PANIC("unimplemented"); \
temp = (temp & LF_MASK_AF) | (temp << (TARGET_LONG_BITS - (size))); \
} \
env->lflags.auxbits = (target_ulong)(uint32_t)temp; \
env->cc_src = temp; \
}
/* carries, result */
@ -77,23 +86,18 @@
/* ******************* */
/* OSZAP */
/* ******************* */
/* size, carries, result */
/* same as setting OSZAPC, but preserve CF and flip PO if the old value of CF
* did not match the high bit of lf_carries. */
#define SET_FLAGS_OSZAP_SIZE(size, lf_carries, lf_result) { \
target_ulong temp = ((lf_carries) & (LF_MASK_AF)) | \
(((lf_carries) >> (size - 2)) << LF_BIT_PO); \
if ((size) == 32) { \
temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \
} else if ((size) == 16) { \
temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 16); \
} else if ((size) == 8) { \
temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 24); \
env->cc_dst = (target_ulong)(int##size##_t)(lf_result); \
target_ulong temp = (lf_carries); \
if ((size) == TARGET_LONG_BITS) { \
temp = (temp & ~(LF_MASK_PD | LF_MASK_SD)); \
} else { \
VM_PANIC("unimplemented"); \
temp = (temp & LF_MASK_AF) | (temp << (TARGET_LONG_BITS - (size))); \
} \
env->lflags.result = (target_ulong)(int##size##_t)(lf_result); \
target_ulong delta_c = (env->lflags.auxbits ^ temp) & LF_MASK_CF; \
delta_c ^= (delta_c >> 1); \
env->lflags.auxbits = (target_ulong)(uint32_t)(temp ^ delta_c); \
target_ulong cf_changed = ((target_long)(env->cc_src ^ temp)) < 0; \
env->cc_src = temp ^ (cf_changed * (LF_MASK_PO | LF_MASK_CF)); \
}
/* carries, result */
@ -104,11 +108,11 @@
#define SET_FLAGS_OSZAP_32(carries, result) \
SET_FLAGS_OSZAP_SIZE(32, carries, result)
void SET_FLAGS_OxxxxC(CPUX86State *env, uint32_t new_of, uint32_t new_cf)
void SET_FLAGS_OxxxxC(CPUX86State *env, bool new_of, bool new_cf)
{
uint32_t temp_po = new_of ^ new_cf;
env->lflags.auxbits &= ~(LF_MASK_PO | LF_MASK_CF);
env->lflags.auxbits |= (temp_po << LF_BIT_PO) | (new_cf << LF_BIT_CF);
env->cc_src &= ~(LF_MASK_PO | LF_MASK_CF);
env->cc_src |= (-(target_ulong)new_cf << LF_BIT_PO);
env->cc_src ^= ((target_ulong)new_of << LF_BIT_PO);
}
void SET_FLAGS_OSZAPC_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
@ -202,104 +206,68 @@ void SET_FLAGS_OSZAPC_LOGIC8(CPUX86State *env, uint8_t v1, uint8_t v2,
SET_FLAGS_OSZAPC_8(0, diff);
}
bool get_PF(CPUX86State *env)
static inline uint32_t get_PF(CPUX86State *env)
{
uint32_t temp = (255 & env->lflags.result);
temp = temp ^ (255 & (env->lflags.auxbits >> LF_BIT_PDB));
temp = (temp ^ (temp >> 4)) & 0x0F;
return (0x9669U >> temp) & 1;
return ((parity8(env->cc_dst) - 1) ^ env->cc_src) & CC_P;
}
void set_PF(CPUX86State *env, bool val)
static inline uint32_t get_OF(CPUX86State *env)
{
uint32_t temp = (255 & env->lflags.result) ^ (!val);
env->lflags.auxbits &= ~(LF_MASK_PDB);
env->lflags.auxbits |= (temp << LF_BIT_PDB);
}
bool get_OF(CPUX86State *env)
{
return ((env->lflags.auxbits + (1U << LF_BIT_PO)) >> LF_BIT_CF) & 1;
return ((env->cc_src >> (LF_BIT_CF - 11)) + CC_O / 2) & CC_O;
}
bool get_CF(CPUX86State *env)
{
return (env->lflags.auxbits >> LF_BIT_CF) & 1;
}
void set_OF(CPUX86State *env, bool val)
{
bool old_cf = get_CF(env);
SET_FLAGS_OxxxxC(env, val, old_cf);
return ((target_long)env->cc_src) < 0;
}
void set_CF(CPUX86State *env, bool val)
{
bool old_of = get_OF(env);
SET_FLAGS_OxxxxC(env, old_of, val);
/* If CF changes, flip PO and CF */
target_ulong temp = -(target_ulong)val;
target_ulong cf_changed = ((target_long)(env->cc_src ^ temp)) < 0;
env->cc_src ^= cf_changed * (LF_MASK_PO | LF_MASK_CF);
}
bool get_AF(CPUX86State *env)
static inline uint32_t get_ZF(CPUX86State *env)
{
return (env->lflags.auxbits >> LF_BIT_AF) & 1;
return env->cc_dst ? 0 : CC_Z;
}
void set_AF(CPUX86State *env, bool val)
static inline uint32_t get_SF(CPUX86State *env)
{
env->lflags.auxbits &= ~(LF_MASK_AF);
env->lflags.auxbits |= val << LF_BIT_AF;
}
bool get_ZF(CPUX86State *env)
{
return !env->lflags.result;
}
void set_ZF(CPUX86State *env, bool val)
{
if (val) {
env->lflags.auxbits ^=
(((env->lflags.result >> LF_SIGN_BIT) & 1) << LF_BIT_SD);
/* merge the parity bits into the Parity Delta Byte */
uint32_t temp_pdb = (255 & env->lflags.result);
env->lflags.auxbits ^= (temp_pdb << LF_BIT_PDB);
/* now zero the .result value */
env->lflags.result = 0;
} else {
env->lflags.result |= (1 << 8);
}
}
bool get_SF(CPUX86State *env)
{
return ((env->lflags.result >> LF_SIGN_BIT) ^
(env->lflags.auxbits >> LF_BIT_SD)) & 1;
}
void set_SF(CPUX86State *env, bool val)
{
bool temp_sf = get_SF(env);
env->lflags.auxbits ^= (temp_sf ^ val) << LF_BIT_SD;
return ((env->cc_dst >> (LF_SIGN_BIT - LF_BIT_SD)) ^
env->cc_src) & CC_S;
}
void lflags_to_rflags(CPUX86State *env)
{
env->eflags &= ~(CC_C|CC_P|CC_A|CC_Z|CC_S|CC_O);
env->eflags |= get_CF(env) ? CC_C : 0;
env->eflags |= get_PF(env) ? CC_P : 0;
env->eflags |= get_AF(env) ? CC_A : 0;
env->eflags |= get_ZF(env) ? CC_Z : 0;
env->eflags |= get_SF(env) ? CC_S : 0;
env->eflags |= get_OF(env) ? CC_O : 0;
/* rotate left by one to move carry-out bits into CF and AF */
env->eflags |= (
(env->cc_src << 1) |
(env->cc_src >> (TARGET_LONG_BITS - 1))) & (CC_C | CC_A);
env->eflags |= get_SF(env);
env->eflags |= get_PF(env);
env->eflags |= get_ZF(env);
env->eflags |= get_OF(env);
}
void rflags_to_lflags(CPUX86State *env)
{
env->lflags.auxbits = env->lflags.result = 0;
set_OF(env, env->eflags & CC_O);
set_SF(env, env->eflags & CC_S);
set_ZF(env, env->eflags & CC_Z);
set_AF(env, env->eflags & CC_A);
set_PF(env, env->eflags & CC_P);
set_CF(env, env->eflags & CC_C);
target_ulong cf_xor_of;
env->cc_src = CC_P;
env->cc_src ^= env->eflags & (CC_S | CC_P);
/* rotate right by one to move CF and AF into the carry-out positions */
env->cc_src |= (
(env->eflags >> 1) |
(env->eflags << (TARGET_LONG_BITS - 1))) & (CC_C | CC_A);
cf_xor_of = (env->eflags & (CC_C | CC_O)) + (CC_O - CC_C);
env->cc_src |= -cf_xor_of & LF_MASK_PO;
/* Leave the low byte zero so that parity is not affected. */
env->cc_dst = !(env->eflags & CC_Z) << 8;
}

View file

@ -28,20 +28,10 @@
void lflags_to_rflags(CPUX86State *env);
void rflags_to_lflags(CPUX86State *env);
bool get_PF(CPUX86State *env);
void set_PF(CPUX86State *env, bool val);
bool get_CF(CPUX86State *env);
void set_CF(CPUX86State *env, bool val);
bool get_AF(CPUX86State *env);
void set_AF(CPUX86State *env, bool val);
bool get_ZF(CPUX86State *env);
void set_ZF(CPUX86State *env, bool val);
bool get_SF(CPUX86State *env);
void set_SF(CPUX86State *env, bool val);
bool get_OF(CPUX86State *env);
void set_OF(CPUX86State *env, bool val);
void SET_FLAGS_OxxxxC(CPUX86State *env, uint32_t new_of, uint32_t new_cf);
void SET_FLAGS_OxxxxC(CPUX86State *env, bool new_of, bool new_cf);
void SET_FLAGS_OSZAPC_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
uint32_t diff);

View file

@ -2542,7 +2542,13 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
s->has_modrm = false;
s->prefix = 0;
next_byte:
next_byte:;
#ifdef TARGET_X86_64
/* clear any REX prefix followed by other prefixes. */
int rex;
rex = -1;
next_byte_rex:
#endif
b = x86_ldub_code(env, s);
/* Collect prefixes. */
@ -2585,13 +2591,12 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
#ifdef TARGET_X86_64
case 0x40 ... 0x4f:
if (CODE64(s)) {
/* REX prefix */
s->prefix |= PREFIX_REX;
s->vex_w = (b >> 3) & 1;
s->rex_r = (b & 0x4) << 1;
s->rex_x = (b & 0x2) << 2;
s->rex_b = (b & 0x1) << 3;
goto next_byte;
/*
* REX prefix; ignored unless it is the last prefix, so
* for now just stash it
*/
rex = b;
goto next_byte_rex;
}
break;
#endif
@ -2618,10 +2623,13 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
/* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ
| PREFIX_LOCK | PREFIX_DATA | PREFIX_REX)) {
| PREFIX_LOCK | PREFIX_DATA)) {
goto illegal_op;
}
#ifdef TARGET_X86_64
if (rex != -1) {
goto illegal_op;
}
s->rex_r = (~vex2 >> 4) & 8;
#endif
if (b == 0xc5) {
@ -2661,6 +2669,16 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
/* Post-process prefixes. */
if (CODE64(s)) {
#ifdef TARGET_X86_64
if (rex != -1) {
s->prefix |= PREFIX_REX;
s->vex_w = (rex >> 3) & 1;
s->rex_r = (rex & 0x4) << 1;
s->rex_x = (rex & 0x2) << 2;
s->rex_b = (rex & 0x1) << 3;
}
#endif
/*
* In 64-bit mode, the default data size is 32-bit. Select 64-bit
* data with rex_w, and 16-bit data with 0x66; rex_w takes precedence

View file

@ -326,10 +326,10 @@ static void tss_set_busy(CPUX86State *env, int tss_selector, bool value,
#define SWITCH_TSS_IRET 1
#define SWITCH_TSS_CALL 2
/* return 0 if switching to a 16-bit selector */
static int switch_tss_ra(CPUX86State *env, int tss_selector,
uint32_t e1, uint32_t e2, int source,
uint32_t next_eip, uintptr_t retaddr)
static void switch_tss_ra(CPUX86State *env, int tss_selector,
uint32_t e1, uint32_t e2, int source,
uint32_t next_eip, bool has_error_code,
uint32_t error_code, uintptr_t retaddr)
{
int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, i;
target_ulong tss_base;
@ -473,10 +473,6 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector,
new_segs[R_GS] = 0;
new_trap = 0;
}
/* XXX: avoid a compiler warning, see
http://support.amd.com/us/Processor_TechDocs/24593.pdf
chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
(void)new_trap;
/* clear busy bit (it is restartable) */
if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
@ -599,14 +595,43 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector,
cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
}
#endif
return type >> 3;
if (has_error_code) {
int cpl = env->hflags & HF_CPL_MASK;
StackAccess sa;
/* push the error code */
sa.env = env;
sa.ra = retaddr;
sa.mmu_index = x86_mmu_index_pl(env, cpl);
sa.sp = env->regs[R_ESP];
if (env->segs[R_SS].flags & DESC_B_MASK) {
sa.sp_mask = 0xffffffff;
} else {
sa.sp_mask = 0xffff;
}
sa.ss_base = env->segs[R_SS].base;
if (type & 8) {
pushl(&sa, error_code);
} else {
pushw(&sa, error_code);
}
SET_ESP(sa.sp, sa.sp_mask);
}
if (new_trap) {
env->dr[6] |= DR6_BT;
raise_exception_ra(env, EXCP01_DB, retaddr);
}
}
static int switch_tss(CPUX86State *env, int tss_selector,
uint32_t e1, uint32_t e2, int source,
uint32_t next_eip)
static void switch_tss(CPUX86State *env, int tss_selector,
uint32_t e1, uint32_t e2, int source,
uint32_t next_eip, bool has_error_code,
int error_code)
{
return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
switch_tss_ra(env, tss_selector, e1, e2, source, next_eip,
has_error_code, error_code, 0);
}
static inline unsigned int get_sp_mask(unsigned int e2)
@ -719,25 +744,8 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
if (!(e2 & DESC_P_MASK)) {
raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
}
shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
if (has_error_code) {
/* push the error code on the destination stack */
cpl = env->hflags & HF_CPL_MASK;
sa.mmu_index = x86_mmu_index_pl(env, cpl);
if (env->segs[R_SS].flags & DESC_B_MASK) {
sa.sp_mask = 0xffffffff;
} else {
sa.sp_mask = 0xffff;
}
sa.sp = env->regs[R_ESP];
sa.ss_base = env->segs[R_SS].base;
if (shift) {
pushl(&sa, error_code);
} else {
pushw(&sa, error_code);
}
SET_ESP(sa.sp, sa.sp_mask);
}
switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip,
has_error_code, error_code);
return;
}
@ -1533,7 +1541,8 @@ void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
if (dpl < cpl || dpl < rpl) {
raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip,
false, 0, GETPC());
break;
case 4: /* 286 call gate */
case 12: /* 386 call gate */
@ -1745,7 +1754,8 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
if (dpl < cpl || dpl < rpl) {
raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip,
false, 0, GETPC());
return;
case 4: /* 286 call gate */
case 12: /* 386 call gate */
@ -2256,7 +2266,8 @@ void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
if (type != 3) {
raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
}
switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip,
false, 0, GETPC());
} else {
helper_ret_protected(env, shift, 1, 0, GETPC());
}

View file

@ -1,4 +1,4 @@
if not get_option('tcg').allowed()
if not have_tcg
subdir_done()
endif

View file

@ -1,7 +1,4 @@
system_ss.add(pixman)
specific_ss.add(when: ['CONFIG_SYSTEM_ONLY'], if_true: pixman) # for the include path
specific_ss.add(when: ['CONFIG_SYSTEM_ONLY'], if_true: opengl) # for the include path
system_ss.add(png)
system_ss.add(files(
'clipboard.c',