mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-10 11:04:58 -06:00
target/i386: Assert !VM86 for x86_64 user-only
For i386-linux-user, we can enter vm86 mode via the vm86(2) syscall. That syscall explicitly returns to 32-bit mode, and the syscall does not exist for a 64-bit x86_64 executable. Since we're adding an accessor macro, pull the value directly out of flags otherwise. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20210514151342.384376-10-richard.henderson@linaro.org>
This commit is contained in:
parent
0ab011cca0
commit
f8a35846d5
1 changed files with 22 additions and 18 deletions
|
@ -116,7 +116,6 @@ typedef struct DisasContext {
|
||||||
#endif
|
#endif
|
||||||
int addseg; /* non zero if either DS/ES/SS have a non zero base */
|
int addseg; /* non zero if either DS/ES/SS have a non zero base */
|
||||||
int f_st; /* currently unused */
|
int f_st; /* currently unused */
|
||||||
int vm86; /* vm86 mode */
|
|
||||||
int tf; /* TF cpu flag */
|
int tf; /* TF cpu flag */
|
||||||
int jmp_opt; /* use direct block chaining for direct jumps */
|
int jmp_opt; /* use direct block chaining for direct jumps */
|
||||||
int repz_opt; /* optimize jumps within repz instructions */
|
int repz_opt; /* optimize jumps within repz instructions */
|
||||||
|
@ -159,6 +158,11 @@ typedef struct DisasContext {
|
||||||
#define CPL(S) ((S)->cpl)
|
#define CPL(S) ((S)->cpl)
|
||||||
#define IOPL(S) ((S)->iopl)
|
#define IOPL(S) ((S)->iopl)
|
||||||
#endif
|
#endif
|
||||||
|
#if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
|
||||||
|
#define VM86(S) false
|
||||||
|
#else
|
||||||
|
#define VM86(S) (((S)->flags & HF_VM_MASK) != 0)
|
||||||
|
#endif
|
||||||
|
|
||||||
static void gen_eob(DisasContext *s);
|
static void gen_eob(DisasContext *s);
|
||||||
static void gen_jr(DisasContext *s, TCGv dest);
|
static void gen_jr(DisasContext *s, TCGv dest);
|
||||||
|
@ -631,7 +635,7 @@ static void gen_check_io(DisasContext *s, MemOp ot, target_ulong cur_eip,
|
||||||
{
|
{
|
||||||
target_ulong next_eip;
|
target_ulong next_eip;
|
||||||
|
|
||||||
if (PE(s) && (CPL(s) > IOPL(s) || s->vm86)) {
|
if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
|
||||||
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
|
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
|
||||||
switch (ot) {
|
switch (ot) {
|
||||||
case MO_8:
|
case MO_8:
|
||||||
|
@ -1309,7 +1313,7 @@ static bool check_cpl0(DisasContext *s)
|
||||||
/* If vm86, check for iopl == 3; if not, raise #GP and return false. */
|
/* If vm86, check for iopl == 3; if not, raise #GP and return false. */
|
||||||
static bool check_vm86_iopl(DisasContext *s)
|
static bool check_vm86_iopl(DisasContext *s)
|
||||||
{
|
{
|
||||||
if (!s->vm86 || IOPL(s) == 3) {
|
if (!VM86(s) || IOPL(s) == 3) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
gen_exception_gpf(s);
|
gen_exception_gpf(s);
|
||||||
|
@ -1319,7 +1323,7 @@ static bool check_vm86_iopl(DisasContext *s)
|
||||||
/* Check for iopl allowing access; if not, raise #GP and return false. */
|
/* Check for iopl allowing access; if not, raise #GP and return false. */
|
||||||
static bool check_iopl(DisasContext *s)
|
static bool check_iopl(DisasContext *s)
|
||||||
{
|
{
|
||||||
if (s->vm86 ? IOPL(s) == 3 : CPL(s) <= IOPL(s)) {
|
if (VM86(s) ? IOPL(s) == 3 : CPL(s) <= IOPL(s)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
gen_exception_gpf(s);
|
gen_exception_gpf(s);
|
||||||
|
@ -2359,7 +2363,7 @@ static inline void gen_op_movl_seg_T0_vm(DisasContext *s, X86Seg seg_reg)
|
||||||
call this function with seg_reg == R_CS */
|
call this function with seg_reg == R_CS */
|
||||||
static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg)
|
static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg)
|
||||||
{
|
{
|
||||||
if (PE(s) && !s->vm86) {
|
if (PE(s) && !VM86(s)) {
|
||||||
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
|
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
|
||||||
gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), s->tmp2_i32);
|
gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), s->tmp2_i32);
|
||||||
/* abort translation because the addseg value may change or
|
/* abort translation because the addseg value may change or
|
||||||
|
@ -4615,7 +4619,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||||
case 0xc4: /* 3-byte VEX */
|
case 0xc4: /* 3-byte VEX */
|
||||||
/* VEX prefixes cannot be used except in 32-bit mode.
|
/* VEX prefixes cannot be used except in 32-bit mode.
|
||||||
Otherwise the instruction is LES or LDS. */
|
Otherwise the instruction is LES or LDS. */
|
||||||
if (s->code32 && !s->vm86) {
|
if (s->code32 && !VM86(s)) {
|
||||||
static const int pp_prefix[4] = {
|
static const int pp_prefix[4] = {
|
||||||
0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
|
0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
|
||||||
};
|
};
|
||||||
|
@ -5122,7 +5126,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||||
gen_add_A0_im(s, 1 << ot);
|
gen_add_A0_im(s, 1 << ot);
|
||||||
gen_op_ld_v(s, MO_16, s->T0, s->A0);
|
gen_op_ld_v(s, MO_16, s->T0, s->A0);
|
||||||
do_lcall:
|
do_lcall:
|
||||||
if (PE(s) && !s->vm86) {
|
if (PE(s) && !VM86(s)) {
|
||||||
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
|
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
|
||||||
gen_helper_lcall_protected(cpu_env, s->tmp2_i32, s->T1,
|
gen_helper_lcall_protected(cpu_env, s->tmp2_i32, s->T1,
|
||||||
tcg_const_i32(dflag - 1),
|
tcg_const_i32(dflag - 1),
|
||||||
|
@ -5152,7 +5156,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||||
gen_add_A0_im(s, 1 << ot);
|
gen_add_A0_im(s, 1 << ot);
|
||||||
gen_op_ld_v(s, MO_16, s->T0, s->A0);
|
gen_op_ld_v(s, MO_16, s->T0, s->A0);
|
||||||
do_ljmp:
|
do_ljmp:
|
||||||
if (PE(s) && !s->vm86) {
|
if (PE(s) && !VM86(s)) {
|
||||||
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
|
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
|
||||||
gen_helper_ljmp_protected(cpu_env, s->tmp2_i32, s->T1,
|
gen_helper_ljmp_protected(cpu_env, s->tmp2_i32, s->T1,
|
||||||
tcg_const_tl(s->pc - s->cs_base));
|
tcg_const_tl(s->pc - s->cs_base));
|
||||||
|
@ -6585,7 +6589,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||||
case 0xca: /* lret im */
|
case 0xca: /* lret im */
|
||||||
val = x86_ldsw_code(env, s);
|
val = x86_ldsw_code(env, s);
|
||||||
do_lret:
|
do_lret:
|
||||||
if (PE(s) && !s->vm86) {
|
if (PE(s) && !VM86(s)) {
|
||||||
gen_update_cc_op(s);
|
gen_update_cc_op(s);
|
||||||
gen_jmp_im(s, pc_start - s->cs_base);
|
gen_jmp_im(s, pc_start - s->cs_base);
|
||||||
gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
|
gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
|
||||||
|
@ -6611,7 +6615,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||||
goto do_lret;
|
goto do_lret;
|
||||||
case 0xcf: /* iret */
|
case 0xcf: /* iret */
|
||||||
gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
|
gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
|
||||||
if (!PE(s) || s->vm86) {
|
if (!PE(s) || VM86(s)) {
|
||||||
/* real mode or vm86 mode */
|
/* real mode or vm86 mode */
|
||||||
if (!check_vm86_iopl(s)) {
|
if (!check_vm86_iopl(s)) {
|
||||||
break;
|
break;
|
||||||
|
@ -7315,7 +7319,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||||
op = (modrm >> 3) & 7;
|
op = (modrm >> 3) & 7;
|
||||||
switch(op) {
|
switch(op) {
|
||||||
case 0: /* sldt */
|
case 0: /* sldt */
|
||||||
if (!PE(s) || s->vm86)
|
if (!PE(s) || VM86(s))
|
||||||
goto illegal_op;
|
goto illegal_op;
|
||||||
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
|
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
|
||||||
tcg_gen_ld32u_tl(s->T0, cpu_env,
|
tcg_gen_ld32u_tl(s->T0, cpu_env,
|
||||||
|
@ -7324,7 +7328,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||||
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
|
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
|
||||||
break;
|
break;
|
||||||
case 2: /* lldt */
|
case 2: /* lldt */
|
||||||
if (!PE(s) || s->vm86)
|
if (!PE(s) || VM86(s))
|
||||||
goto illegal_op;
|
goto illegal_op;
|
||||||
if (check_cpl0(s)) {
|
if (check_cpl0(s)) {
|
||||||
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
|
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
|
||||||
|
@ -7334,7 +7338,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 1: /* str */
|
case 1: /* str */
|
||||||
if (!PE(s) || s->vm86)
|
if (!PE(s) || VM86(s))
|
||||||
goto illegal_op;
|
goto illegal_op;
|
||||||
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
|
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
|
||||||
tcg_gen_ld32u_tl(s->T0, cpu_env,
|
tcg_gen_ld32u_tl(s->T0, cpu_env,
|
||||||
|
@ -7343,7 +7347,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||||
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
|
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
|
||||||
break;
|
break;
|
||||||
case 3: /* ltr */
|
case 3: /* ltr */
|
||||||
if (!PE(s) || s->vm86)
|
if (!PE(s) || VM86(s))
|
||||||
goto illegal_op;
|
goto illegal_op;
|
||||||
if (check_cpl0(s)) {
|
if (check_cpl0(s)) {
|
||||||
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
|
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
|
||||||
|
@ -7354,7 +7358,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||||
break;
|
break;
|
||||||
case 4: /* verr */
|
case 4: /* verr */
|
||||||
case 5: /* verw */
|
case 5: /* verw */
|
||||||
if (!PE(s) || s->vm86)
|
if (!PE(s) || VM86(s))
|
||||||
goto illegal_op;
|
goto illegal_op;
|
||||||
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
|
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
|
||||||
gen_update_cc_op(s);
|
gen_update_cc_op(s);
|
||||||
|
@ -7725,7 +7729,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||||
TCGLabel *label1;
|
TCGLabel *label1;
|
||||||
TCGv t0, t1, t2, a0;
|
TCGv t0, t1, t2, a0;
|
||||||
|
|
||||||
if (!PE(s) || s->vm86)
|
if (!PE(s) || VM86(s))
|
||||||
goto illegal_op;
|
goto illegal_op;
|
||||||
t0 = tcg_temp_local_new();
|
t0 = tcg_temp_local_new();
|
||||||
t1 = tcg_temp_local_new();
|
t1 = tcg_temp_local_new();
|
||||||
|
@ -7773,7 +7777,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||||
{
|
{
|
||||||
TCGLabel *label1;
|
TCGLabel *label1;
|
||||||
TCGv t0;
|
TCGv t0;
|
||||||
if (!PE(s) || s->vm86)
|
if (!PE(s) || VM86(s))
|
||||||
goto illegal_op;
|
goto illegal_op;
|
||||||
ot = dflag != MO_16 ? MO_32 : MO_16;
|
ot = dflag != MO_16 ? MO_32 : MO_16;
|
||||||
modrm = x86_ldub_code(env, s);
|
modrm = x86_ldub_code(env, s);
|
||||||
|
@ -8489,12 +8493,12 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
|
||||||
g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
|
g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
|
||||||
g_assert(CPL(dc) == cpl);
|
g_assert(CPL(dc) == cpl);
|
||||||
g_assert(IOPL(dc) == iopl);
|
g_assert(IOPL(dc) == iopl);
|
||||||
|
g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
|
||||||
|
|
||||||
dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
|
dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
|
||||||
dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
|
dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
|
||||||
dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
|
dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
|
||||||
dc->f_st = 0;
|
dc->f_st = 0;
|
||||||
dc->vm86 = (flags >> VM_SHIFT) & 1;
|
|
||||||
dc->tf = (flags >> TF_SHIFT) & 1;
|
dc->tf = (flags >> TF_SHIFT) & 1;
|
||||||
dc->cc_op = CC_OP_DYNAMIC;
|
dc->cc_op = CC_OP_DYNAMIC;
|
||||||
dc->cc_op_dirty = false;
|
dc->cc_op_dirty = false;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue