* target/i386: new feature bits for AMD processors

* target/i386/tcg: improvements around flag handling
 * target/i386: add AVX10 support
 * target/i386: add GraniteRapids-v2 model
 * dockerfiles: add libcbor
 * New nitro-enclave machine type
 * qom: cleanups to object_new
 * configure: detect 64-bit MIPS for rust
 * configure: deprecate 32-bit MIPS
 -----BEGIN PGP SIGNATURE-----
 
 iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmcjvkQUHHBib256aW5p
 QHJlZGhhdC5jb20ACgkQv/vSX3jHroPIKgf/etNpO2T+eLFtWN/Qd5eopBXqNd9k
 KmeK9EgW9lqx2IPGNen33O+uKpb/TsMmubSsSF+YxTp7pmkc8+71f3rBMaIAD02r
 /paHSMVw0+f12DAFQz1jdvGihR7Mew0wcF/UdEt737y6vEmPxLTyYG3Gfa4NSZwT
 /V5jTOIcfUN/UEjNgIp6NTuOEESKmlqt22pfMapgkwMlAJYeeJU2X9eGYE86wJbq
 ZSXNgK3jL9wGT2XKa3e+OKzHfFpSkrB0JbQbdico9pefnBokN/hTeeUJ81wBAc7u
 i00W1CEQVJ5lhBc121d4AWMp83ME6HijJUOTMmJbFIONPsITFPHK1CAkng==
 =D4nR
 -----END PGP SIGNATURE-----

Merge tag 'for-upstream-i386' of https://gitlab.com/bonzini/qemu into staging

* target/i386: new feature bits for AMD processors
* target/i386/tcg: improvements around flag handling
* target/i386: add AVX10 support
* target/i386: add GraniteRapids-v2 model
* dockerfiles: add libcbor
* New nitro-enclave machine type
* qom: cleanups to object_new
* configure: detect 64-bit MIPS for rust
* configure: deprecate 32-bit MIPS

# -----BEGIN PGP SIGNATURE-----
#
# iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmcjvkQUHHBib256aW5p
# QHJlZGhhdC5jb20ACgkQv/vSX3jHroPIKgf/etNpO2T+eLFtWN/Qd5eopBXqNd9k
# KmeK9EgW9lqx2IPGNen33O+uKpb/TsMmubSsSF+YxTp7pmkc8+71f3rBMaIAD02r
# /paHSMVw0+f12DAFQz1jdvGihR7Mew0wcF/UdEt737y6vEmPxLTyYG3Gfa4NSZwT
# /V5jTOIcfUN/UEjNgIp6NTuOEESKmlqt22pfMapgkwMlAJYeeJU2X9eGYE86wJbq
# ZSXNgK3jL9wGT2XKa3e+OKzHfFpSkrB0JbQbdico9pefnBokN/hTeeUJ81wBAc7u
# i00W1CEQVJ5lhBc121d4AWMp83ME6HijJUOTMmJbFIONPsITFPHK1CAkng==
# =D4nR
# -----END PGP SIGNATURE-----
# gpg: Signature made Thu 31 Oct 2024 17:28:36 GMT
# gpg:                using RSA key F13338574B662389866C7682BFFBD25F78C7AE83
# gpg:                issuer "pbonzini@redhat.com"
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [full]
# gpg:                 aka "Paolo Bonzini <pbonzini@redhat.com>" [full]
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4  E2F7 7E15 100C CD36 69B1
#      Subkey fingerprint: F133 3857 4B66 2389 866C  7682 BFFB D25F 78C7 AE83

* tag 'for-upstream-i386' of https://gitlab.com/bonzini/qemu: (49 commits)
  target/i386: Introduce GraniteRapids-v2 model
  target/i386: Add AVX512 state when AVX10 is supported
  target/i386: Add feature dependencies for AVX10
  target/i386: add CPUID.24 features for AVX10
  target/i386: add AVX10 feature and AVX10 version property
  target/i386: return bool from x86_cpu_filter_features
  target/i386: do not rely on ExtSaveArea for accelerator-supported XCR0 bits
  target/i386: cpu: set correct supported XCR0 features for TCG
  target/i386: use + to put flags together
  target/i386: use higher-precision arithmetic to compute CF
  target/i386: use compiler builtin to compute PF
  target/i386: make flag variables unsigned
  target/i386: add a note about gen_jcc1
  target/i386: add a few more trivial CCPrepare cases
  target/i386: optimize TEST+Jxx sequences
  target/i386: optimize computation of ZF from CC_OP_DYNAMIC
  target/i386: Wrap cc_op_live with a validity check
  target/i386: Introduce cc_op_size
  target/i386: Rearrange CCOp
  target/i386: remove CC_OP_CLR
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2024-11-02 16:21:38 +00:00
commit c94bee4cd6
84 changed files with 4219 additions and 350 deletions

View file

@ -27,7 +27,7 @@
/***********************************************************/
/* x86 debug */
static const char *cc_op_str[CC_OP_NB] = {
static const char * const cc_op_str[] = {
[CC_OP_DYNAMIC] = "DYNAMIC",
[CC_OP_EFLAGS] = "EFLAGS",
@ -91,7 +91,6 @@ static const char *cc_op_str[CC_OP_NB] = {
[CC_OP_BMILGQ] = "BMILGQ",
[CC_OP_POPCNT] = "POPCNT",
[CC_OP_CLR] = "CLR",
};
static void
@ -347,7 +346,6 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags)
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
int eflags, i, nb;
char cc_op_name[32];
static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
eflags = cpu_compute_eflags(env);
@ -456,10 +454,16 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags)
env->dr[6], env->dr[7]);
}
if (flags & CPU_DUMP_CCOP) {
if ((unsigned)env->cc_op < CC_OP_NB)
snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
else
snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
const char *cc_op_name = NULL;
char cc_op_buf[32];
if ((unsigned)env->cc_op < ARRAY_SIZE(cc_op_str)) {
cc_op_name = cc_op_str[env->cc_op];
}
if (cc_op_name == NULL) {
snprintf(cc_op_buf, sizeof(cc_op_buf), "[%d]", env->cc_op);
cc_op_name = cc_op_buf;
}
#ifdef TARGET_X86_64
if (env->hflags & HF_CS64_MASK) {
qemu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%s\n",

View file

@ -46,6 +46,9 @@
#include "cpu-internal.h"
static void x86_cpu_realizefn(DeviceState *dev, Error **errp);
static void x86_cpu_get_supported_cpuid(uint32_t func, uint32_t index,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx);
/* Helpers for building CPUID[2] descriptors: */
@ -898,6 +901,7 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
#define TCG_SGX_12_0_EAX_FEATURES 0
#define TCG_SGX_12_0_EBX_FEATURES 0
#define TCG_SGX_12_1_EAX_FEATURES 0
#define TCG_24_0_EBX_FEATURES 0
#if defined CONFIG_USER_ONLY
#define CPUID_8000_0008_EBX_KERNEL_FEATURES (CPUID_8000_0008_EBX_IBPB | \
@ -1132,7 +1136,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
"avx-vnni-int8", "avx-ne-convert", NULL, NULL,
"amx-complex", NULL, "avx-vnni-int16", NULL,
NULL, NULL, "prefetchiti", NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, "avx10",
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
@ -1163,6 +1167,20 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
},
.tcg_features = TCG_7_2_EDX_FEATURES,
},
[FEAT_24_0_EBX] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
[16] = "avx10-128",
[17] = "avx10-256",
[18] = "avx10-512",
},
.cpuid = {
.eax = 0x24,
.needs_ecx = true, .ecx = 0,
.reg = R_EBX,
},
.tcg_features = TCG_24_0_EBX_FEATURES,
},
[FEAT_8000_0007_EDX] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
@ -1220,13 +1238,35 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, "sbpb",
"ibpb-brtype", NULL, NULL, NULL,
"eraps", NULL, NULL, "sbpb",
"ibpb-brtype", "srso-no", "srso-user-kernel-no", NULL,
},
.cpuid = { .eax = 0x80000021, .reg = R_EAX, },
.tcg_features = 0,
.unmigratable_flags = 0,
},
[FEAT_8000_0021_EBX] = {
.type = CPUID_FEATURE_WORD,
.cpuid = { .eax = 0x80000021, .reg = R_EBX, },
.tcg_features = 0,
.unmigratable_flags = 0,
},
[FEAT_8000_0022_EAX] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
"perfmon-v2", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
.cpuid = { .eax = 0x80000022, .reg = R_EAX, },
.tcg_features = 0,
.unmigratable_flags = 0,
},
[FEAT_XSAVE] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
@ -1296,7 +1336,9 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.needs_ecx = true, .ecx = 0,
.reg = R_EAX,
},
.tcg_features = ~0U,
.tcg_features = XSTATE_FP_MASK | XSTATE_SSE_MASK |
XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
XSTATE_PKRU_MASK,
.migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
@ -1309,7 +1351,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.needs_ecx = true, .ecx = 0,
.reg = R_EDX,
},
.tcg_features = ~0U,
.tcg_features = 0U,
},
/*Below are MSR exposed features*/
[FEAT_ARCH_CAPABILITIES] = {
@ -1745,6 +1787,22 @@ static FeatureDep feature_dependencies[] = {
.from = { FEAT_7_0_EBX, CPUID_7_0_EBX_SGX },
.to = { FEAT_SGX_12_1_EAX, ~0ull },
},
{
.from = { FEAT_24_0_EBX, CPUID_24_0_EBX_AVX10_128 },
.to = { FEAT_24_0_EBX, CPUID_24_0_EBX_AVX10_256 },
},
{
.from = { FEAT_24_0_EBX, CPUID_24_0_EBX_AVX10_256 },
.to = { FEAT_24_0_EBX, CPUID_24_0_EBX_AVX10_512 },
},
{
.from = { FEAT_24_0_EBX, CPUID_24_0_EBX_AVX10_VL_MASK },
.to = { FEAT_7_1_EDX, CPUID_7_1_EDX_AVX10 },
},
{
.from = { FEAT_7_1_EDX, CPUID_7_1_EDX_AVX10 },
.to = { FEAT_24_0_EBX, ~0ull },
},
};
typedef struct X86RegisterInfo32 {
@ -1965,6 +2023,7 @@ typedef struct X86CPUDefinition {
int family;
int model;
int stepping;
uint8_t avx10_version;
FeatureWordArray features;
const char *model_id;
const CPUCaches *const cache_info;
@ -4344,6 +4403,23 @@ static const X86CPUDefinition builtin_x86_defs[] = {
.model_id = "Intel Xeon Processor (GraniteRapids)",
.versions = (X86CPUVersionDefinition[]) {
{ .version = 1 },
{
.version = 2,
.props = (PropValue[]) {
{ "ss", "on" },
{ "tsc-adjust", "on" },
{ "cldemote", "on" },
{ "movdiri", "on" },
{ "movdir64b", "on" },
{ "avx10", "on" },
{ "avx10-128", "on" },
{ "avx10-256", "on" },
{ "avx10-512", "on" },
{ "avx10-version", "1" },
{ "stepping", "1" },
{ /* end of list */ }
}
},
{ /* end of list */ },
},
},
@ -5226,7 +5302,7 @@ static const X86CPUDefinition builtin_x86_defs[] = {
CPUID_8000_0008_EBX_STIBP_ALWAYS_ON |
CPUID_8000_0008_EBX_AMD_SSBD | CPUID_8000_0008_EBX_AMD_PSFD,
.features[FEAT_8000_0021_EAX] =
CPUID_8000_0021_EAX_No_NESTED_DATA_BP |
CPUID_8000_0021_EAX_NO_NESTED_DATA_BP |
CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING |
CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE |
CPUID_8000_0021_EAX_AUTO_IBRS,
@ -5816,7 +5892,7 @@ static void x86_cpu_parse_featurestr(const char *typename, char *features,
}
}
static void x86_cpu_filter_features(X86CPU *cpu, bool verbose);
static bool x86_cpu_filter_features(X86CPU *cpu, bool verbose);
/* Build a list with the name of all features on a feature word array */
static void x86_cpu_list_feature_names(FeatureWordArray features,
@ -6307,6 +6383,9 @@ static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model)
*/
object_property_set_str(OBJECT(cpu), "vendor", def->vendor, &error_abort);
object_property_set_uint(OBJECT(cpu), "avx10-version", def->avx10_version,
&error_abort);
x86_cpu_apply_version_props(cpu, model);
/*
@ -6835,6 +6914,16 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
}
break;
}
case 0x24: {
*eax = 0;
*ebx = 0;
*ecx = 0;
*edx = 0;
if ((env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10) && count == 0) {
*ebx = env->features[FEAT_24_0_EBX] | env->avx10_version;
}
break;
}
case 0x40000000:
/*
* CPUID code in kvm_arch_init_vcpu() ignores stuff
@ -7010,6 +7099,16 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*edx = 0;
}
break;
case 0x80000022:
*eax = *ebx = *ecx = *edx = 0;
/* AMD Extended Performance Monitoring and Debug */
if (kvm_enabled() && cpu->enable_pmu &&
(env->features[FEAT_8000_0022_EAX] & CPUID_8000_0022_EAX_PERFMON_V2)) {
*eax |= CPUID_8000_0022_EAX_PERFMON_V2;
*ebx |= kvm_arch_get_supported_cpuid(cs->kvm_state, index, count,
R_EBX) & 0xf;
}
break;
case 0xC0000000:
*eax = env->cpuid_xlevel2;
*ebx = 0;
@ -7043,8 +7142,9 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
}
break;
case 0x80000021:
*eax = *ebx = *ecx = *edx = 0;
*eax = env->features[FEAT_8000_0021_EAX];
*ebx = *ecx = *edx = 0;
*ebx = env->features[FEAT_8000_0021_EBX];
break;
default:
/* reserved values: zero */
@ -7067,6 +7167,23 @@ static void x86_cpu_set_sgxlepubkeyhash(CPUX86State *env)
#endif
}
static bool cpuid_has_xsave_feature(CPUX86State *env, const ExtSaveArea *esa)
{
if (!esa->size) {
return false;
}
if (env->features[esa->feature] & esa->bits) {
return true;
}
if (esa->feature == FEAT_7_0_EBX && esa->bits == CPUID_7_0_EBX_AVX512F
&& (env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10)) {
return true;
}
return false;
}
static void x86_cpu_reset_hold(Object *obj, ResetType type)
{
CPUState *cs = CPU(obj);
@ -7175,7 +7292,7 @@ static void x86_cpu_reset_hold(Object *obj, ResetType type)
if (!((1 << i) & CPUID_XSTATE_XCR0_MASK)) {
continue;
}
if (env->features[esa->feature] & esa->bits) {
if (cpuid_has_xsave_feature(env, esa)) {
xcr0 |= 1ull << i;
}
}
@ -7313,7 +7430,7 @@ static void x86_cpu_enable_xsave_components(X86CPU *cpu)
mask = 0;
for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
const ExtSaveArea *esa = &x86_ext_save_areas[i];
if (env->features[esa->feature] & esa->bits) {
if (cpuid_has_xsave_feature(env, esa)) {
mask |= (1ULL << i);
}
}
@ -7406,6 +7523,12 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
~env->user_features[w] &
~feature_word_info[w].no_autoenable_flags;
}
if ((env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10) && !env->avx10_version) {
uint32_t eax, ebx, ecx, edx;
x86_cpu_get_supported_cpuid(0x24, 0, &eax, &ebx, &ecx, &edx);
env->avx10_version = ebx & 0xff;
}
}
for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) {
@ -7469,6 +7592,11 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F);
}
/* Advanced Vector Extensions 10 (AVX10) requires CPUID[0x24] */
if (env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10) {
x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x24);
}
/* SVM requires CPUID[0x8000000A] */
if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
@ -7512,13 +7640,17 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
* Finishes initialization of CPUID data, filters CPU feature
* words based on host availability of each feature.
*
* Returns: 0 if all flags are supported by the host, non-zero otherwise.
* Returns: true if any flag is not supported by the host, false otherwise.
*/
static void x86_cpu_filter_features(X86CPU *cpu, bool verbose)
static bool x86_cpu_filter_features(X86CPU *cpu, bool verbose)
{
CPUX86State *env = &cpu->env;
FeatureWord w;
const char *prefix = NULL;
bool have_filtered_features;
uint32_t eax_0, ebx_0, ecx_0, edx_0;
uint32_t eax_1, ebx_1, ecx_1, edx_1;
if (verbose) {
prefix = accel_uses_host_cpuid()
@ -7540,13 +7672,10 @@ static void x86_cpu_filter_features(X86CPU *cpu, bool verbose)
*/
if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
kvm_enabled()) {
uint32_t eax_0, ebx_0, ecx_0, edx_0_unused;
uint32_t eax_1, ebx_1, ecx_1_unused, edx_1_unused;
x86_cpu_get_supported_cpuid(0x14, 0,
&eax_0, &ebx_0, &ecx_0, &edx_0_unused);
&eax_0, &ebx_0, &ecx_0, &edx_0);
x86_cpu_get_supported_cpuid(0x14, 1,
&eax_1, &ebx_1, &ecx_1_unused, &edx_1_unused);
&eax_1, &ebx_1, &ecx_1, &edx_1);
if (!eax_0 ||
((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
@ -7566,6 +7695,28 @@ static void x86_cpu_filter_features(X86CPU *cpu, bool verbose)
mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix);
}
}
have_filtered_features = x86_cpu_have_filtered_features(cpu);
if (env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10) {
x86_cpu_get_supported_cpuid(0x24, 0,
&eax_0, &ebx_0, &ecx_0, &edx_0);
uint8_t version = ebx_0 & 0xff;
if (version < env->avx10_version) {
if (prefix) {
warn_report("%s: avx10.%d. Adjust to avx10.%d",
prefix, env->avx10_version, version);
}
env->avx10_version = version;
have_filtered_features = true;
}
} else if (env->avx10_version && prefix) {
warn_report("%s: avx10.%d.", prefix, env->avx10_version);
have_filtered_features = true;
}
return have_filtered_features;
}
static void x86_cpu_hyperv_realize(X86CPU *cpu)
@ -7663,14 +7814,14 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
}
}
x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid);
if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) {
error_setg(&local_err,
accel_uses_host_cpuid() ?
if (x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid)) {
if (cpu->enforce_cpuid) {
error_setg(&local_err,
accel_uses_host_cpuid() ?
"Host doesn't support requested features" :
"TCG doesn't support requested features");
goto out;
goto out;
}
}
/* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
@ -7985,6 +8136,26 @@ static void x86_cpu_register_feature_bit_props(X86CPUClass *xcc,
static void x86_cpu_post_initfn(Object *obj)
{
static bool first = true;
uint64_t supported_xcr0;
int i;
if (first) {
first = false;
supported_xcr0 =
((uint64_t) x86_cpu_get_supported_feature_word(NULL, FEAT_XSAVE_XCR0_HI) << 32) |
x86_cpu_get_supported_feature_word(NULL, FEAT_XSAVE_XCR0_LO);
for (i = XSTATE_SSE_BIT + 1; i < XSAVE_STATE_AREA_COUNT; i++) {
ExtSaveArea *esa = &x86_ext_save_areas[i];
if (!(supported_xcr0 & (1 << i))) {
esa->size = 0;
}
}
}
accel_cpu_instance_init(CPU(obj));
}
@ -8329,6 +8500,7 @@ static Property x86_cpu_properties[] = {
DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
DEFINE_PROP_UINT8("avx10-version", X86CPU, env.avx10_version, 0),
DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0),
DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor),

View file

@ -24,6 +24,7 @@
#include "cpu-qom.h"
#include "kvm/hyperv-proto.h"
#include "exec/cpu-defs.h"
#include "exec/memop.h"
#include "hw/i386/topology.h"
#include "qapi/qapi-types-common.h"
#include "qemu/cpu-float.h"
@ -634,6 +635,8 @@ typedef enum FeatureWord {
FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */
FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */
FEAT_8000_0021_EAX, /* CPUID[8000_0021].EAX */
FEAT_8000_0021_EBX, /* CPUID[8000_0021].EBX */
FEAT_8000_0022_EAX, /* CPUID[8000_0022].EAX */
FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */
FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */
FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */
@ -662,6 +665,7 @@ typedef enum FeatureWord {
FEAT_XSAVE_XSS_HI, /* CPUID[EAX=0xd,ECX=1].EDX */
FEAT_7_1_EDX, /* CPUID[EAX=7,ECX=1].EDX */
FEAT_7_2_EDX, /* CPUID[EAX=7,ECX=2].EDX */
FEAT_24_0_EBX, /* CPUID[EAX=0x24,ECX=0].EBX */
FEATURE_WORDS,
} FeatureWord;
@ -972,6 +976,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_7_1_EDX_AMX_COMPLEX (1U << 8)
/* PREFETCHIT0/1 Instructions */
#define CPUID_7_1_EDX_PREFETCHITI (1U << 14)
/* Support for Advanced Vector Extensions 10 */
#define CPUID_7_1_EDX_AVX10 (1U << 19)
/* Flexible return and event delivery (FRED) */
#define CPUID_7_1_EAX_FRED (1U << 17)
/* Load into IA32_KERNEL_GS_BASE (LKGS) */
@ -988,6 +994,17 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
/* Packets which contain IP payload have LIP values */
#define CPUID_14_0_ECX_LIP (1U << 31)
/* AVX10 128-bit vector support is present */
#define CPUID_24_0_EBX_AVX10_128 (1U << 16)
/* AVX10 256-bit vector support is present */
#define CPUID_24_0_EBX_AVX10_256 (1U << 17)
/* AVX10 512-bit vector support is present */
#define CPUID_24_0_EBX_AVX10_512 (1U << 18)
/* AVX10 vector length support mask */
#define CPUID_24_0_EBX_AVX10_VL_MASK (CPUID_24_0_EBX_AVX10_128 | \
CPUID_24_0_EBX_AVX10_256 | \
CPUID_24_0_EBX_AVX10_512)
/* RAS Features */
#define CPUID_8000_0007_EBX_OVERFLOW_RECOV (1U << 0)
#define CPUID_8000_0007_EBX_SUCCOR (1U << 1)
@ -1014,13 +1031,32 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_8000_0008_EBX_AMD_PSFD (1U << 28)
/* Processor ignores nested data breakpoints */
#define CPUID_8000_0021_EAX_No_NESTED_DATA_BP (1U << 0)
#define CPUID_8000_0021_EAX_NO_NESTED_DATA_BP (1U << 0)
/* LFENCE is always serializing */
#define CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING (1U << 2)
/* Null Selector Clears Base */
#define CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE (1U << 6)
#define CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE (1U << 6)
/* Automatic IBRS */
#define CPUID_8000_0021_EAX_AUTO_IBRS (1U << 8)
#define CPUID_8000_0021_EAX_AUTO_IBRS (1U << 8)
/* Enhanced Return Address Predictor Scurity */
#define CPUID_8000_0021_EAX_ERAPS (1U << 24)
/* Selective Branch Predictor Barrier */
#define CPUID_8000_0021_EAX_SBPB (1U << 27)
/* IBPB includes branch type prediction flushing */
#define CPUID_8000_0021_EAX_IBPB_BRTYPE (1U << 28)
/* Not vulnerable to Speculative Return Stack Overflow */
#define CPUID_8000_0021_EAX_SRSO_NO (1U << 29)
/* Not vulnerable to SRSO at the user-kernel boundary */
#define CPUID_8000_0021_EAX_SRSO_USER_KERNEL_NO (1U << 30)
/*
* Return Address Predictor size. RapSize x 8 is the minimum number of
* CALL instructions software needs to execute to flush the RAP.
*/
#define CPUID_8000_0021_EBX_RAPSIZE (8U << 16)
/* Performance Monitoring Version 2 */
#define CPUID_8000_0022_EAX_PERFMON_V2 (1U << 0)
#define CPUID_XSAVE_XSAVEOPT (1U << 0)
#define CPUID_XSAVE_XSAVEC (1U << 1)
@ -1278,14 +1314,14 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
* are only needed for conditional branches.
*/
typedef enum {
CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */
CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */
CC_OP_ADOX, /* CC_SRC2 = O, CC_SRC = rest. */
CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */
CC_OP_CLR, /* Z and P set, all other flags clear. */
CC_OP_EFLAGS = 0, /* all cc are explicitly computed, CC_SRC = flags */
CC_OP_ADCX = 1, /* CC_DST = C, CC_SRC = rest. */
CC_OP_ADOX = 2, /* CC_SRC2 = O, CC_SRC = rest. */
CC_OP_ADCOX = 3, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */
CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
/* Low 2 bits = MemOp constant for the size */
#define CC_OP_FIRST_BWLQ CC_OP_MULB
CC_OP_MULB = 4, /* modify all flags, C, O = (CC_SRC != 0) */
CC_OP_MULW,
CC_OP_MULL,
CC_OP_MULQ,
@ -1355,10 +1391,24 @@ typedef enum {
CC_OP_POPCNTL__,
CC_OP_POPCNTQ__,
CC_OP_POPCNT = sizeof(target_ulong) == 8 ? CC_OP_POPCNTQ__ : CC_OP_POPCNTL__,
#define CC_OP_LAST_BWLQ CC_OP_POPCNTQ__
CC_OP_NB,
CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
} CCOp;
QEMU_BUILD_BUG_ON(CC_OP_NB >= 128);
/* See X86DecodedInsn.cc_op, using int8_t. */
QEMU_BUILD_BUG_ON(CC_OP_DYNAMIC > INT8_MAX);
static inline MemOp cc_op_size(CCOp op)
{
MemOp size = op & 3;
QEMU_BUILD_BUG_ON(CC_OP_FIRST_BWLQ & 3);
assert(op >= CC_OP_FIRST_BWLQ && op <= CC_OP_LAST_BWLQ);
assert(size <= MO_TL);
return size;
}
typedef struct SegmentCache {
uint32_t selector;
@ -1918,6 +1968,8 @@ typedef struct CPUArchState {
uint32_t cpuid_vendor3;
uint32_t cpuid_version;
FeatureWordArray features;
/* AVX10 version */
uint8_t avx10_version;
/* Features that were explicitly enabled/disabled */
FeatureWordArray user_features;
uint32_t cpuid_model[12];

View file

@ -1,5 +1,6 @@
DEF_HELPER_FLAGS_4(cc_compute_all, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int)
DEF_HELPER_FLAGS_4(cc_compute_c, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int)
DEF_HELPER_FLAGS_3(cc_compute_nz, TCG_CALL_NO_RWG_SE, tl, tl, tl, int)
DEF_HELPER_3(write_eflags, void, env, tl, i32)
DEF_HELPER_1(read_eflags, tl, env)

View file

@ -42,7 +42,7 @@ static uint32_t host_cpu_phys_bits(void)
return host_phys_bits;
}
static uint32_t host_cpu_adjust_phys_bits(X86CPU *cpu)
static void host_cpu_adjust_phys_bits(X86CPU *cpu)
{
uint32_t host_phys_bits = host_cpu_phys_bits();
uint32_t phys_bits = cpu->phys_bits;
@ -66,7 +66,7 @@ static uint32_t host_cpu_adjust_phys_bits(X86CPU *cpu)
}
}
return phys_bits;
cpu->phys_bits = phys_bits;
}
bool host_cpu_realizefn(CPUState *cs, Error **errp)
@ -75,17 +75,7 @@ bool host_cpu_realizefn(CPUState *cs, Error **errp)
CPUX86State *env = &cpu->env;
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
uint32_t phys_bits = host_cpu_adjust_phys_bits(cpu);
if (phys_bits &&
(phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
phys_bits < 32)) {
error_setg(errp, "phys-bits should be between 32 and %u "
" (but is %u)",
TARGET_PHYS_ADDR_SPACE_BITS, phys_bits);
return false;
}
cpu->phys_bits = phys_bits;
host_cpu_adjust_phys_bits(cpu);
}
return true;
}

View file

@ -21,28 +21,38 @@
*/
#include "qemu/osdep.h"
#include "qemu/cpuid.h"
#include "host/cpuinfo.h"
#include "cpu.h"
#include "x86.h"
#include "vmx.h"
#include "sysemu/hvf.h"
#include "hvf-i386.h"
static bool xgetbv(uint32_t cpuid_ecx, uint32_t idx, uint64_t *xcr)
static bool cached_xcr0;
static uint64_t supported_xcr0;
static void cache_host_xcr0()
{
uint32_t xcrl, xcrh;
if (cpuid_ecx & CPUID_EXT_OSXSAVE) {
/*
* The xgetbv instruction is not available to older versions of
* the assembler, so we encode the instruction manually.
*/
asm(".byte 0x0f, 0x01, 0xd0" : "=a" (xcrl), "=d" (xcrh) : "c" (idx));
*xcr = (((uint64_t)xcrh) << 32) | xcrl;
return true;
if (cached_xcr0) {
return;
}
return false;
if (cpuinfo & CPUINFO_OSXSAVE) {
uint64_t host_xcr0 = xgetbv_low(0);
/* Only show xcr0 bits corresponding to usable features. */
supported_xcr0 = host_xcr0 & (XSTATE_FP_MASK |
XSTATE_SSE_MASK | XSTATE_YMM_MASK |
XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK |
XSTATE_Hi16_ZMM_MASK);
if ((supported_xcr0 & (XSTATE_FP_MASK | XSTATE_SSE_MASK)) !=
(XSTATE_FP_MASK | XSTATE_SSE_MASK)) {
supported_xcr0 = 0;
}
}
cached_xcr0 = true;
}
uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx,
@ -51,6 +61,7 @@ uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx,
uint64_t cap;
uint32_t eax, ebx, ecx, edx;
cache_host_xcr0();
host_cpuid(func, idx, &eax, &ebx, &ecx, &edx);
switch (func) {
@ -66,7 +77,8 @@ uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx,
ecx &= CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSSE3 |
CPUID_EXT_FMA | CPUID_EXT_CX16 | CPUID_EXT_PCID |
CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_MOVBE |
CPUID_EXT_POPCNT | CPUID_EXT_AES | CPUID_EXT_XSAVE |
CPUID_EXT_POPCNT | CPUID_EXT_AES |
(supported_xcr0 ? CPUID_EXT_XSAVE : 0) |
CPUID_EXT_AVX | CPUID_EXT_F16C | CPUID_EXT_RDRAND;
ecx |= CPUID_EXT_HYPERVISOR;
break;
@ -107,16 +119,14 @@ uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx,
eax = 0;
break;
case 0xD:
if (!supported_xcr0 ||
(idx > 1 && !(supported_xcr0 & (1 << idx)))) {
eax = ebx = ecx = edx = 0;
break;
}
if (idx == 0) {
uint64_t host_xcr0;
if (xgetbv(ecx, 0, &host_xcr0)) {
uint64_t supp_xcr0 = host_xcr0 & (XSTATE_FP_MASK |
XSTATE_SSE_MASK | XSTATE_YMM_MASK |
XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK |
XSTATE_Hi16_ZMM_MASK);
eax &= supp_xcr0;
}
eax = supported_xcr0;
} else if (idx == 1) {
hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, &cap);
eax &= CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1;

View file

@ -143,10 +143,6 @@ static void kvm_cpu_xsave_init(void)
if (!esa->size) {
continue;
}
if ((x86_cpu_get_supported_feature_word(NULL, esa->feature) & esa->bits)
!= esa->bits) {
continue;
}
host_cpuid(0xd, i, &eax, &ebx, &ecx, &edx);
if (eax != 0) {
assert(esa->size == eax);

View file

@ -1923,7 +1923,8 @@ static uint32_t kvm_x86_build_cpuid(CPUX86State *env,
case 0x7:
case 0x14:
case 0x1d:
case 0x1e: {
case 0x1e:
case 0x24: {
uint32_t times;
c->function = i;

View file

@ -22,41 +22,6 @@
#include "exec/helper-proto.h"
#include "helper-tcg.h"
const uint8_t parity_table[256] = {
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
};
#define SHIFT 0
#include "cc_helper_template.h.inc"
#undef SHIFT
@ -95,6 +60,19 @@ static target_ulong compute_all_adcox(target_ulong dst, target_ulong src1,
return (src1 & ~(CC_C | CC_O)) | (dst * CC_C) | (src2 * CC_O);
}
target_ulong helper_cc_compute_nz(target_ulong dst, target_ulong src1,
int op)
{
if (CC_OP_HAS_EFLAGS(op)) {
return ~src1 & CC_Z;
} else {
MemOp size = cc_op_size(op);
target_ulong mask = MAKE_64BIT_MASK(0, 8 << size);
return dst & mask;
}
}
target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1,
target_ulong src2, int op)
{
@ -104,8 +82,6 @@ target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1,
case CC_OP_EFLAGS:
return src1;
case CC_OP_CLR:
return CC_Z | CC_P;
case CC_OP_POPCNT:
return dst ? 0 : CC_Z;
@ -243,7 +219,6 @@ target_ulong helper_cc_compute_c(target_ulong dst, target_ulong src1,
case CC_OP_LOGICW:
case CC_OP_LOGICL:
case CC_OP_LOGICQ:
case CC_OP_CLR:
case CC_OP_POPCNT:
return 0;

View file

@ -22,12 +22,17 @@
#if DATA_BITS == 8
#define SUFFIX b
#define DATA_TYPE uint8_t
#define WIDER_TYPE uint32_t
#elif DATA_BITS == 16
#define SUFFIX w
#define DATA_TYPE uint16_t
#define WIDER_TYPE uint32_t
#elif DATA_BITS == 32
#define SUFFIX l
#define DATA_TYPE uint32_t
#if HOST_LONG_BITS >= 64
#define WIDER_TYPE uint64_t
#endif
#elif DATA_BITS == 64
#define SUFFIX q
#define DATA_TYPE uint64_t
@ -39,18 +44,18 @@
/* dynamic flags computation */
static int glue(compute_all_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
static uint32_t glue(compute_all_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
{
int cf, pf, af, zf, sf, of;
uint32_t cf, pf, af, zf, sf, of;
DATA_TYPE src2 = dst - src1;
cf = dst < src1;
pf = parity_table[(uint8_t)dst];
pf = compute_pf(dst);
af = (dst ^ src1 ^ src2) & CC_A;
zf = (dst == 0) * CC_Z;
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
of = lshift((src1 ^ src2 ^ -1) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
return cf | pf | af | zf | sf | of;
return cf + pf + af + zf + sf + of;
}
static int glue(compute_c_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
@ -58,39 +63,54 @@ static int glue(compute_c_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
return dst < src1;
}
static int glue(compute_all_adc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1,
static uint32_t glue(compute_all_adc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1,
DATA_TYPE src3)
{
int cf, pf, af, zf, sf, of;
uint32_t cf, pf, af, zf, sf, of;
#ifdef WIDER_TYPE
WIDER_TYPE src13 = (WIDER_TYPE) src1 + (WIDER_TYPE) src3;
DATA_TYPE src2 = dst - src13;
cf = dst < src13;
#else
DATA_TYPE src2 = dst - src1 - src3;
cf = (src3 ? dst <= src1 : dst < src1);
pf = parity_table[(uint8_t)dst];
#endif
pf = compute_pf(dst);
af = (dst ^ src1 ^ src2) & 0x10;
zf = (dst == 0) << 6;
sf = lshift(dst, 8 - DATA_BITS) & 0x80;
of = lshift((src1 ^ src2 ^ -1) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
return cf | pf | af | zf | sf | of;
return cf + pf + af + zf + sf + of;
}
static int glue(compute_c_adc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1,
DATA_TYPE src3)
{
#ifdef WIDER_TYPE
WIDER_TYPE src13 = (WIDER_TYPE) src1 + (WIDER_TYPE) src3;
return dst < src13;
#else
return src3 ? dst <= src1 : dst < src1;
#endif
}
static int glue(compute_all_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2)
static uint32_t glue(compute_all_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2)
{
int cf, pf, af, zf, sf, of;
uint32_t cf, pf, af, zf, sf, of;
DATA_TYPE src1 = dst + src2;
cf = src1 < src2;
pf = parity_table[(uint8_t)dst];
pf = compute_pf(dst);
af = (dst ^ src1 ^ src2) & CC_A;
zf = (dst == 0) * CC_Z;
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
of = lshift((src1 ^ src2) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
return cf | pf | af | zf | sf | of;
return cf + pf + af + zf + sf + of;
}
static int glue(compute_c_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2)
@ -100,86 +120,102 @@ static int glue(compute_c_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2)
return src1 < src2;
}
static int glue(compute_all_sbb, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2,
static uint32_t glue(compute_all_sbb, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2,
DATA_TYPE src3)
{
int cf, pf, af, zf, sf, of;
uint32_t cf, pf, af, zf, sf, of;
#ifdef WIDER_TYPE
WIDER_TYPE src23 = (WIDER_TYPE) src2 + (WIDER_TYPE) src3;
DATA_TYPE src1 = dst + src23;
cf = src1 < src23;
#else
DATA_TYPE src1 = dst + src2 + src3;
cf = (src3 ? src1 <= src2 : src1 < src2);
pf = parity_table[(uint8_t)dst];
#endif
pf = compute_pf(dst);
af = (dst ^ src1 ^ src2) & 0x10;
zf = (dst == 0) << 6;
sf = lshift(dst, 8 - DATA_BITS) & 0x80;
of = lshift((src1 ^ src2) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
return cf | pf | af | zf | sf | of;
return cf + pf + af + zf + sf + of;
}
static int glue(compute_c_sbb, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2,
DATA_TYPE src3)
{
#ifdef WIDER_TYPE
WIDER_TYPE src23 = (WIDER_TYPE) src2 + (WIDER_TYPE) src3;
DATA_TYPE src1 = dst + src23;
return src1 < src23;
#else
DATA_TYPE src1 = dst + src2 + src3;
return (src3 ? src1 <= src2 : src1 < src2);
#endif
}
static int glue(compute_all_logic, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
static uint32_t glue(compute_all_logic, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
{
int cf, pf, af, zf, sf, of;
uint32_t cf, pf, af, zf, sf, of;
cf = 0;
pf = parity_table[(uint8_t)dst];
pf = compute_pf(dst);
af = 0;
zf = (dst == 0) * CC_Z;
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
of = 0;
return cf | pf | af | zf | sf | of;
return cf + pf + af + zf + sf + of;
}
static int glue(compute_all_inc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
static uint32_t glue(compute_all_inc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
{
int cf, pf, af, zf, sf, of;
uint32_t cf, pf, af, zf, sf, of;
DATA_TYPE src2;
cf = src1;
src1 = dst - 1;
src2 = 1;
pf = parity_table[(uint8_t)dst];
pf = compute_pf(dst);
af = (dst ^ src1 ^ src2) & CC_A;
zf = (dst == 0) * CC_Z;
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
of = (dst == SIGN_MASK) * CC_O;
return cf | pf | af | zf | sf | of;
return cf + pf + af + zf + sf + of;
}
static int glue(compute_all_dec, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
static uint32_t glue(compute_all_dec, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
{
int cf, pf, af, zf, sf, of;
uint32_t cf, pf, af, zf, sf, of;
DATA_TYPE src2;
cf = src1;
src1 = dst + 1;
src2 = 1;
pf = parity_table[(uint8_t)dst];
pf = compute_pf(dst);
af = (dst ^ src1 ^ src2) & CC_A;
zf = (dst == 0) * CC_Z;
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
of = (dst == SIGN_MASK - 1) * CC_O;
return cf | pf | af | zf | sf | of;
return cf + pf + af + zf + sf + of;
}
static int glue(compute_all_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
static uint32_t glue(compute_all_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
{
int cf, pf, af, zf, sf, of;
uint32_t cf, pf, af, zf, sf, of;
cf = (src1 >> (DATA_BITS - 1)) & CC_C;
pf = parity_table[(uint8_t)dst];
pf = compute_pf(dst);
af = 0; /* undefined */
zf = (dst == 0) * CC_Z;
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
/* of is defined iff shift count == 1 */
of = lshift(src1 ^ dst, 12 - DATA_BITS) & CC_O;
return cf | pf | af | zf | sf | of;
return cf + pf + af + zf + sf + of;
}
static int glue(compute_c_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
@ -187,39 +223,39 @@ static int glue(compute_c_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
return (src1 >> (DATA_BITS - 1)) & CC_C;
}
static int glue(compute_all_sar, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
static uint32_t glue(compute_all_sar, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
{
int cf, pf, af, zf, sf, of;
uint32_t cf, pf, af, zf, sf, of;
cf = src1 & 1;
pf = parity_table[(uint8_t)dst];
pf = compute_pf(dst);
af = 0; /* undefined */
zf = (dst == 0) * CC_Z;
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
/* of is defined iff shift count == 1 */
of = lshift(src1 ^ dst, 12 - DATA_BITS) & CC_O;
return cf | pf | af | zf | sf | of;
return cf + pf + af + zf + sf + of;
}
/* NOTE: we compute the flags like the P4. On olders CPUs, only OF and
CF are modified and it is slower to do that. Note as well that we
don't truncate SRC1 for computing carry to DATA_TYPE. */
static int glue(compute_all_mul, SUFFIX)(DATA_TYPE dst, target_long src1)
static uint32_t glue(compute_all_mul, SUFFIX)(DATA_TYPE dst, target_long src1)
{
int cf, pf, af, zf, sf, of;
uint32_t cf, pf, af, zf, sf, of;
cf = (src1 != 0);
pf = parity_table[(uint8_t)dst];
pf = compute_pf(dst);
af = 0; /* undefined */
zf = (dst == 0) * CC_Z;
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
of = cf * CC_O;
return cf | pf | af | zf | sf | of;
return cf + pf + af + zf + sf + of;
}
static int glue(compute_all_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
static uint32_t glue(compute_all_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
{
int cf, pf, af, zf, sf, of;
uint32_t cf, pf, af, zf, sf, of;
cf = (src1 == 0);
pf = 0; /* undefined */
@ -227,7 +263,7 @@ static int glue(compute_all_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
zf = (dst == 0) * CC_Z;
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
of = 0;
return cf | pf | af | zf | sf | of;
return cf + pf + af + zf + sf + of;
}
static int glue(compute_c_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
@ -237,7 +273,7 @@ static int glue(compute_c_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
static int glue(compute_all_blsi, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
{
int cf, pf, af, zf, sf, of;
uint32_t cf, pf, af, zf, sf, of;
cf = (src1 != 0);
pf = 0; /* undefined */
@ -245,7 +281,7 @@ static int glue(compute_all_blsi, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
zf = (dst == 0) * CC_Z;
sf = lshift(dst, 8 - DATA_BITS) & CC_S;
of = 0;
return cf | pf | af | zf | sf | of;
return cf + pf + af + zf + sf + of;
}
static int glue(compute_c_blsi, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
@ -258,3 +294,4 @@ static int glue(compute_c_blsi, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
#undef DATA_TYPE
#undef DATA_MASK
#undef SUFFIX
#undef WIDER_TYPE

View file

@ -345,9 +345,9 @@ static void decode_group15(DisasContext *s, CPUX86State *env, X86OpEntry *entry,
[1] = X86_OP_ENTRYw(RDxxBASE, R,y, cpuid(FSGSBASE) chk(o64) p_f3),
[2] = X86_OP_ENTRYr(WRxxBASE, R,y, cpuid(FSGSBASE) chk(o64) p_f3 zextT0),
[3] = X86_OP_ENTRYr(WRxxBASE, R,y, cpuid(FSGSBASE) chk(o64) p_f3 zextT0),
[5] = X86_OP_ENTRY0(LFENCE, cpuid(SSE2) p_00),
[5] = X86_OP_ENTRY0(LFENCE, cpuid(SSE) p_00),
[6] = X86_OP_ENTRY0(MFENCE, cpuid(SSE2) p_00),
[7] = X86_OP_ENTRY0(SFENCE, cpuid(SSE2) p_00),
[7] = X86_OP_ENTRY0(SFENCE, cpuid(SSE) p_00),
};
static const X86OpEntry group15_mem[8] = {
@ -2865,7 +2865,7 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
tcg_gen_mov_i32(cpu_cc_op, decode.cc_op_dynamic);
}
set_cc_op(s, decode.cc_op);
cc_live = cc_op_live[decode.cc_op];
cc_live = cc_op_live(decode.cc_op);
} else {
cc_live = 0;
}

View file

@ -1452,19 +1452,12 @@ static void gen_bt_flags(DisasContext *s, X86DecodedInsn *decode, TCGv src, TCGv
* C is the result of the test, Z is unchanged, and the others
* are all undefined.
*/
switch (s->cc_op) {
case CC_OP_DYNAMIC:
case CC_OP_CLR:
case CC_OP_EFLAGS:
case CC_OP_ADCX:
case CC_OP_ADOX:
case CC_OP_ADCOX:
if (s->cc_op == CC_OP_DYNAMIC || CC_OP_HAS_EFLAGS(s->cc_op)) {
/* Generate EFLAGS and replace the C bit. */
cf = tcg_temp_new();
tcg_gen_setcond_tl(TCG_COND_TSTNE, cf, src, mask);
prepare_update_cf(decode, s, cf);
break;
default:
} else {
/*
* Z was going to be computed from the non-zero status of CC_DST.
* We can get that same Z value (and the new C value) by leaving
@ -1473,9 +1466,8 @@ static void gen_bt_flags(DisasContext *s, X86DecodedInsn *decode, TCGv src, TCGv
*/
decode->cc_src = tcg_temp_new();
decode->cc_dst = cpu_cc_dst;
decode->cc_op = ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB;
decode->cc_op = CC_OP_SARB + cc_op_size(s->cc_op);
tcg_gen_shr_tl(decode->cc_src, src, s->T1);
break;
}
}
@ -3354,7 +3346,8 @@ static bool gen_eflags_adcox(DisasContext *s, X86DecodedInsn *decode, bool want_
* bit, we might as well fish CF out of EFLAGS and save a shift.
*/
if (want_carry && (!need_flags || s->cc_op == CC_OP_SHLB + MO_TL)) {
tcg_gen_shri_tl(decode->cc_dst, cpu_cc_src, (8 << (s->cc_op - CC_OP_SHLB)) - 1);
MemOp size = cc_op_size(s->cc_op);
tcg_gen_shri_tl(decode->cc_dst, cpu_cc_src, (8 << size) - 1);
got_cf = true;
}
gen_mov_eflags(s, decode->cc_src);
@ -3784,13 +3777,13 @@ static void gen_shift_dynamic_flags(DisasContext *s, X86DecodedInsn *decode, TCG
decode->cc_op_dynamic = tcg_temp_new_i32();
assert(decode->cc_dst == s->T0);
if (cc_op_live[s->cc_op] & USES_CC_DST) {
if (cc_op_live(s->cc_op) & USES_CC_DST) {
decode->cc_dst = tcg_temp_new();
tcg_gen_movcond_tl(TCG_COND_EQ, decode->cc_dst, count, tcg_constant_tl(0),
cpu_cc_dst, s->T0);
}
if (cc_op_live[s->cc_op] & USES_CC_SRC) {
if (cc_op_live(s->cc_op) & USES_CC_SRC) {
tcg_gen_movcond_tl(TCG_COND_EQ, decode->cc_src, count, tcg_constant_tl(0),
cpu_cc_src, decode->cc_src);
}
@ -4724,7 +4717,8 @@ static void gen_XOR(DisasContext *s, X86DecodedInsn *decode)
decode->op[2].unit == X86_OP_INT &&
decode->op[1].n == decode->op[2].n) {
tcg_gen_movi_tl(s->T0, 0);
decode->cc_op = CC_OP_CLR;
decode->cc_op = CC_OP_EFLAGS;
decode->cc_src = tcg_constant_tl(CC_Z | CC_P);
} else {
MemOp ot = decode->op[1].ot;

View file

@ -21,6 +21,7 @@
#define I386_HELPER_TCG_H
#include "exec/exec-all.h"
#include "qemu/host-utils.h"
/* Maximum instruction code size */
#define TARGET_MAX_INSN_SIZE 16
@ -87,7 +88,10 @@ G_NORETURN void x86_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
#endif
/* cc_helper.c */
extern const uint8_t parity_table[256];
static inline unsigned int compute_pf(uint8_t x)
{
return !parity8(x) * CC_P;
}
/* misc_helper.c */
void cpu_load_eflags(CPUX86State *env, int eflags, int update_mask);

View file

@ -237,7 +237,7 @@ void helper_daa(CPUX86State *env)
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xff) | al;
/* well, speed is not an issue here, so we compute the flags by hand */
eflags |= (al == 0) << 6; /* zf */
eflags |= parity_table[al]; /* pf */
eflags |= compute_pf(al);
eflags |= (al & 0x80); /* sf */
CC_SRC = eflags;
CC_OP = CC_OP_EFLAGS;
@ -269,7 +269,7 @@ void helper_das(CPUX86State *env)
env->regs[R_EAX] = (env->regs[R_EAX] & ~0xff) | al;
/* well, speed is not an issue here, so we compute the flags by hand */
eflags |= (al == 0) << 6; /* zf */
eflags |= parity_table[al]; /* pf */
eflags |= compute_pf(al);
eflags |= (al & 0x80); /* sf */
CC_SRC = eflags;
CC_OP = CC_OP_EFLAGS;

View file

@ -291,7 +291,7 @@ enum {
};
/* Bit set if the global variable is live after setting CC_OP to X. */
static const uint8_t cc_op_live[CC_OP_NB] = {
static const uint8_t cc_op_live_[] = {
[CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
[CC_OP_EFLAGS] = USES_CC_SRC,
[CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
@ -309,10 +309,24 @@ static const uint8_t cc_op_live[CC_OP_NB] = {
[CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
[CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
[CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
[CC_OP_CLR] = 0,
[CC_OP_POPCNT] = USES_CC_DST,
};
static uint8_t cc_op_live(CCOp op)
{
uint8_t result;
assert(op >= 0 && op < ARRAY_SIZE(cc_op_live_));
/*
* Check that the array is fully populated. A zero entry would correspond
* to a fixed value of EFLAGS, which can be obtained with CC_OP_EFLAGS
* as well.
*/
result = cc_op_live_[op];
assert(result);
return result;
}
static void set_cc_op_1(DisasContext *s, CCOp op, bool dirty)
{
int dead;
@ -322,7 +336,7 @@ static void set_cc_op_1(DisasContext *s, CCOp op, bool dirty)
}
/* Discard CC computation that will no longer be used. */
dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
dead = cc_op_live(s->cc_op) & ~cc_op_live(op);
if (dead & USES_CC_DST) {
tcg_gen_discard_tl(cpu_cc_dst);
}
@ -803,17 +817,13 @@ static void gen_mov_eflags(DisasContext *s, TCGv reg)
tcg_gen_mov_tl(reg, cpu_cc_src);
return;
}
if (s->cc_op == CC_OP_CLR) {
tcg_gen_movi_tl(reg, CC_Z | CC_P);
return;
}
dst = cpu_cc_dst;
src1 = cpu_cc_src;
src2 = cpu_cc_src2;
/* Take care to not read values that are not live. */
live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
live = cc_op_live(s->cc_op) & ~USES_CC_SRCT;
dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
if (dead) {
TCGv zero = tcg_constant_tl(0);
@ -883,21 +893,20 @@ static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
case CC_OP_SUBB ... CC_OP_SUBQ:
/* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
size = s->cc_op - CC_OP_SUBB;
gen_ext_tl(s->cc_srcT, s->cc_srcT, size, false);
gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size);
tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
return (CCPrepare) { .cond = TCG_COND_LTU, .reg = s->cc_srcT,
.reg2 = cpu_cc_src, .use_reg2 = true };
case CC_OP_ADDB ... CC_OP_ADDQ:
/* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
size = s->cc_op - CC_OP_ADDB;
gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size, false);
gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
size = cc_op_size(s->cc_op);
tcg_gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size);
tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
return (CCPrepare) { .cond = TCG_COND_LTU, .reg = cpu_cc_dst,
.reg2 = cpu_cc_src, .use_reg2 = true };
case CC_OP_LOGICB ... CC_OP_LOGICQ:
case CC_OP_CLR:
case CC_OP_POPCNT:
return (CCPrepare) { .cond = TCG_COND_NEVER };
@ -908,7 +917,7 @@ static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
case CC_OP_SHLB ... CC_OP_SHLQ:
/* (CC_SRC >> (DATA_BITS - 1)) & 1 */
size = s->cc_op - CC_OP_SHLB;
size = cc_op_size(s->cc_op);
return gen_prepare_sign_nz(cpu_cc_src, size);
case CC_OP_MULB ... CC_OP_MULQ:
@ -916,11 +925,11 @@ static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
.reg = cpu_cc_src };
case CC_OP_BMILGB ... CC_OP_BMILGQ:
size = s->cc_op - CC_OP_BMILGB;
size = cc_op_size(s->cc_op);
return gen_prepare_val_nz(cpu_cc_src, size, true);
case CC_OP_BLSIB ... CC_OP_BLSIQ:
size = s->cc_op - CC_OP_BLSIB;
size = cc_op_size(s->cc_op);
return gen_prepare_val_nz(cpu_cc_src, size, false);
case CC_OP_ADCX:
@ -969,14 +978,10 @@ static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
case CC_OP_ADCOX:
return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
.imm = CC_S };
case CC_OP_CLR:
case CC_OP_POPCNT:
return (CCPrepare) { .cond = TCG_COND_NEVER };
default:
{
MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
return gen_prepare_sign_nz(cpu_cc_dst, size);
}
return gen_prepare_sign_nz(cpu_cc_dst, cc_op_size(s->cc_op));
}
}
@ -988,7 +993,7 @@ static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
case CC_OP_ADCOX:
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
.no_setcond = true };
case CC_OP_CLR:
case CC_OP_LOGICB ... CC_OP_LOGICQ:
case CC_OP_POPCNT:
return (CCPrepare) { .cond = TCG_COND_NEVER };
case CC_OP_MULB ... CC_OP_MULQ:
@ -1004,20 +1009,24 @@ static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
{
switch (s->cc_op) {
case CC_OP_DYNAMIC:
gen_compute_eflags(s);
/* FALLTHRU */
case CC_OP_EFLAGS:
case CC_OP_ADCX:
case CC_OP_ADOX:
case CC_OP_ADCOX:
return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
.imm = CC_Z };
case CC_OP_CLR:
return (CCPrepare) { .cond = TCG_COND_ALWAYS };
case CC_OP_DYNAMIC:
gen_update_cc_op(s);
if (!reg) {
reg = tcg_temp_new();
}
gen_helper_cc_compute_nz(reg, cpu_cc_dst, cpu_cc_src, cpu_cc_op);
return (CCPrepare) { .cond = TCG_COND_EQ, .reg = reg, .imm = 0 };
case CC_OP_POPCNT:
return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_dst };
default:
{
MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
MemOp size = cc_op_size(s->cc_op);
return gen_prepare_val_nz(cpu_cc_dst, size, true);
}
}
@ -1038,11 +1047,11 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
switch (s->cc_op) {
case CC_OP_SUBB ... CC_OP_SUBQ:
/* We optimize relational operators for the cmp/jcc case. */
size = s->cc_op - CC_OP_SUBB;
size = cc_op_size(s->cc_op);
switch (jcc_op) {
case JCC_BE:
gen_ext_tl(s->cc_srcT, s->cc_srcT, size, false);
gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size);
tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->cc_srcT,
.reg2 = cpu_cc_src, .use_reg2 = true };
break;
@ -1052,8 +1061,8 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
case JCC_LE:
cond = TCG_COND_LE;
fast_jcc_l:
gen_ext_tl(s->cc_srcT, s->cc_srcT, size, true);
gen_ext_tl(cpu_cc_src, cpu_cc_src, size, true);
tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size | MO_SIGN);
tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size | MO_SIGN);
cc = (CCPrepare) { .cond = cond, .reg = s->cc_srcT,
.reg2 = cpu_cc_src, .use_reg2 = true };
break;
@ -1063,6 +1072,28 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
}
break;
case CC_OP_LOGICB ... CC_OP_LOGICQ:
/* Mostly used for test+jump */
size = s->cc_op - CC_OP_LOGICB;
switch (jcc_op) {
case JCC_BE:
/* CF = 0, becomes jz/je */
jcc_op = JCC_Z;
goto slow_jcc;
case JCC_L:
/* OF = 0, becomes js/jns */
jcc_op = JCC_S;
goto slow_jcc;
case JCC_LE:
/* SF or ZF, becomes signed <= 0 */
tcg_gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size | MO_SIGN);
cc = (CCPrepare) { .cond = TCG_COND_LE, .reg = cpu_cc_dst };
break;
default:
goto slow_jcc;
}
break;
default:
slow_jcc:
/* This actually generates good code for JC, JZ and JS. */
@ -1162,6 +1193,10 @@ static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
{
CCPrepare cc = gen_prepare_cc(s, b, NULL);
/*
* Note that this must be _after_ gen_prepare_cc, because it
* can change the cc_op from CC_OP_DYNAMIC to CC_OP_EFLAGS!
*/
gen_update_cc_op(s);
if (cc.use_reg2) {
tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);