mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-07 17:53:56 -06:00
* qemu-thread portability improvement (Fam)
* virtio-scsi IOMMU fix (Jason) * poisoning and common-obj-y cleanups (Thomas) * initial Hypervisor.framework refactoring (Sergio) * x86 TCG interrupt injection fixes (Wu Xiang, me) * --disable-tcg support for x86 (Yang Zhong, me) * various other bugfixes and cleanups (Daniel, Peter, Thomas) -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJZXJF4AAoJEL/70l94x66DKLUH/jxig9RZgsGyt4PtbGzrv4+N gvlPWPN5t3KQkdU1XRvJSa9qIXSW24k87+E61muIdeNI2GcSsyrNB1v7DyMJVPoZ btHp8Cz69WWL+Lh1k2aw6DzxWfY9dgLba1ujyGWiqA/xtkF0y4eVl3gKd8eO+Tvs WPzj1WkaLT/YL1RD4wkyUvChsVDVdxk03wGDD9oB+pC6ygaoYSDzPo241XtjnNpa KAF8/0yFkxNhNS+6AI+Xq+GNaySpNln6P6xZaNgNeLOXzOcvQveM1/Xi4pMUqhDj H9p+oMjuTPb4iGHyICfVSd6clL8Op2gwKCP2tLs2usWcmTO0oG40vc778WsQEZE= =KdYh -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging * qemu-thread portability improvement (Fam) * virtio-scsi IOMMU fix (Jason) * poisoning and common-obj-y cleanups (Thomas) * initial Hypervisor.framework refactoring (Sergio) * x86 TCG interrupt injection fixes (Wu Xiang, me) * --disable-tcg support for x86 (Yang Zhong, me) * various other bugfixes and cleanups (Daniel, Peter, Thomas) # gpg: Signature made Wed 05 Jul 2017 08:12:56 BST # gpg: using RSA key 0xBFFBD25F78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * remotes/bonzini/tags/for-upstream: (42 commits) target/i386: add the CONFIG_TCG into Makefiles target/i386: add the tcg_enabled() in target/i386/ target/i386: move TLB refill function out of helper.c target/i386: split cpu_set_mxcsr() and make cpu_set_fpuc() inline target/i386: make cpu_get_fp80()/cpu_set_fp80() static target/i386: move cpu_sync_bndcs_hflags() function tcg: add the CONFIG_TCG into Makefiles tcg: add CONFIG_TCG guards in headers exec: elide calls to tb_lock and tb_unlock tcg: move tb_lock out of translate-all.h tcg: add the tcg-stub.c file into accel/stubs/ vapic: use tcg_enabled monitor: disable "info jit" and "info opcount" if !TCG tcg: make tcg_allowed global cpu: move interrupt handling out of translate-common.c tcg: move page_size_init() function vl: add tcg_enabled() for tcg related code vl: convert -tb-size to qemu_strtoul configure: add --disable-tcg configure option configure: early test for supported targets ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
67b9c5d4f3
60 changed files with 1009 additions and 779 deletions
|
@ -1,7 +1,8 @@
|
|||
obj-y += translate.o helper.o cpu.o bpt_helper.o
|
||||
obj-y += excp_helper.o fpu_helper.o cc_helper.o int_helper.o svm_helper.o
|
||||
obj-y += smm_helper.o misc_helper.o mem_helper.o seg_helper.o mpx_helper.o
|
||||
obj-y += gdbstub.o
|
||||
obj-y += helper.o cpu.o gdbstub.o xsave_helper.o
|
||||
obj-$(CONFIG_TCG) += translate.o
|
||||
obj-$(CONFIG_TCG) += bpt_helper.o cc_helper.o excp_helper.o fpu_helper.o
|
||||
obj-$(CONFIG_TCG) += int_helper.o mem_helper.o misc_helper.o mpx_helper.o
|
||||
obj-$(CONFIG_TCG) += seg_helper.o smm_helper.o svm_helper.o
|
||||
obj-$(CONFIG_SOFTMMU) += machine.o arch_memory_mapping.o arch_dump.o monitor.o
|
||||
obj-$(CONFIG_KVM) += kvm.o hyperv.o
|
||||
obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
|
||||
|
|
|
@ -4040,8 +4040,10 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
|
|||
cc->class_by_name = x86_cpu_class_by_name;
|
||||
cc->parse_features = x86_cpu_parse_featurestr;
|
||||
cc->has_work = x86_cpu_has_work;
|
||||
#ifdef CONFIG_TCG
|
||||
cc->do_interrupt = x86_cpu_do_interrupt;
|
||||
cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
|
||||
#endif
|
||||
cc->dump_state = x86_cpu_dump_state;
|
||||
cc->get_crash_info = x86_cpu_get_crash_info;
|
||||
cc->set_pc = x86_cpu_set_pc;
|
||||
|
@ -4070,7 +4072,7 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
|
|||
cc->gdb_core_xml_file = "i386-32bit.xml";
|
||||
cc->gdb_num_core_regs = 41;
|
||||
#endif
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
|
||||
cc->debug_excp_handler = breakpoint_handler;
|
||||
#endif
|
||||
cc->cpu_exec_enter = x86_cpu_exec_enter;
|
||||
|
|
|
@ -52,7 +52,9 @@
|
|||
|
||||
#include "exec/cpu-defs.h"
|
||||
|
||||
#ifdef CONFIG_TCG
|
||||
#include "fpu/softfloat.h"
|
||||
#endif
|
||||
|
||||
#define R_EAX 0
|
||||
#define R_ECX 1
|
||||
|
@ -1418,8 +1420,6 @@ int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
|
|||
|
||||
/* op_helper.c */
|
||||
/* used for debug or cpu save/restore */
|
||||
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f);
|
||||
floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper);
|
||||
|
||||
/* cpu-exec.c */
|
||||
/* the following helpers are only usable in user mode simulation as
|
||||
|
@ -1596,11 +1596,14 @@ void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int,
|
|||
/* cc_helper.c */
|
||||
extern const uint8_t parity_table[256];
|
||||
uint32_t cpu_cc_compute_all(CPUX86State *env1, int op);
|
||||
void update_fp_status(CPUX86State *env);
|
||||
|
||||
static inline uint32_t cpu_compute_eflags(CPUX86State *env)
|
||||
{
|
||||
return env->eflags | cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK);
|
||||
uint32_t eflags = env->eflags;
|
||||
if (tcg_enabled()) {
|
||||
eflags |= cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK);
|
||||
}
|
||||
return eflags;
|
||||
}
|
||||
|
||||
/* NOTE: the translator must set DisasContext.cc_op to CC_OP_EFLAGS
|
||||
|
@ -1645,8 +1648,24 @@ static inline int32_t x86_get_a20_mask(CPUX86State *env)
|
|||
}
|
||||
|
||||
/* fpu_helper.c */
|
||||
void cpu_set_mxcsr(CPUX86State *env, uint32_t val);
|
||||
void cpu_set_fpuc(CPUX86State *env, uint16_t val);
|
||||
void update_fp_status(CPUX86State *env);
|
||||
void update_mxcsr_status(CPUX86State *env);
|
||||
|
||||
static inline void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr)
|
||||
{
|
||||
env->mxcsr = mxcsr;
|
||||
if (tcg_enabled()) {
|
||||
update_mxcsr_status(env);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void cpu_set_fpuc(CPUX86State *env, uint16_t fpuc)
|
||||
{
|
||||
env->fpuc = fpuc;
|
||||
if (tcg_enabled()) {
|
||||
update_fp_status(env);
|
||||
}
|
||||
}
|
||||
|
||||
/* mem_helper.c */
|
||||
void helper_lock_init(void);
|
||||
|
@ -1697,4 +1716,6 @@ void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
|
|||
/* cpu.c */
|
||||
bool cpu_is_bsp(X86CPU *cpu);
|
||||
|
||||
void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf);
|
||||
void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf);
|
||||
#endif /* I386_CPU_H */
|
||||
|
|
|
@ -136,3 +136,346 @@ void raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr
|
|||
{
|
||||
raise_interrupt2(env, exception_index, 0, 0, 0, retaddr);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
|
||||
int is_write, int mmu_idx)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
/* user mode only emulation */
|
||||
is_write &= 1;
|
||||
env->cr[2] = addr;
|
||||
env->error_code = (is_write << PG_ERROR_W_BIT);
|
||||
env->error_code |= PG_ERROR_U_MASK;
|
||||
cs->exception_index = EXCP0E_PAGE;
|
||||
env->exception_is_int = 0;
|
||||
env->exception_next_eip = -1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
/* return value:
|
||||
* -1 = cannot handle fault
|
||||
* 0 = nothing more to do
|
||||
* 1 = generate PF fault
|
||||
*/
|
||||
int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
|
||||
int is_write1, int mmu_idx)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
uint64_t ptep, pte;
|
||||
int32_t a20_mask;
|
||||
target_ulong pde_addr, pte_addr;
|
||||
int error_code = 0;
|
||||
int is_dirty, prot, page_size, is_write, is_user;
|
||||
hwaddr paddr;
|
||||
uint64_t rsvd_mask = PG_HI_RSVD_MASK;
|
||||
uint32_t page_offset;
|
||||
target_ulong vaddr;
|
||||
|
||||
is_user = mmu_idx == MMU_USER_IDX;
|
||||
#if defined(DEBUG_MMU)
|
||||
printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
|
||||
addr, is_write1, is_user, env->eip);
|
||||
#endif
|
||||
is_write = is_write1 & 1;
|
||||
|
||||
a20_mask = x86_get_a20_mask(env);
|
||||
if (!(env->cr[0] & CR0_PG_MASK)) {
|
||||
pte = addr;
|
||||
#ifdef TARGET_X86_64
|
||||
if (!(env->hflags & HF_LMA_MASK)) {
|
||||
/* Without long mode we can only address 32bits in real mode */
|
||||
pte = (uint32_t)pte;
|
||||
}
|
||||
#endif
|
||||
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||
page_size = 4096;
|
||||
goto do_mapping;
|
||||
}
|
||||
|
||||
if (!(env->efer & MSR_EFER_NXE)) {
|
||||
rsvd_mask |= PG_NX_MASK;
|
||||
}
|
||||
|
||||
if (env->cr[4] & CR4_PAE_MASK) {
|
||||
uint64_t pde, pdpe;
|
||||
target_ulong pdpe_addr;
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
if (env->hflags & HF_LMA_MASK) {
|
||||
bool la57 = env->cr[4] & CR4_LA57_MASK;
|
||||
uint64_t pml5e_addr, pml5e;
|
||||
uint64_t pml4e_addr, pml4e;
|
||||
int32_t sext;
|
||||
|
||||
/* test virtual address sign extension */
|
||||
sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
|
||||
if (sext != 0 && sext != -1) {
|
||||
env->error_code = 0;
|
||||
cs->exception_index = EXCP0D_GPF;
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (la57) {
|
||||
pml5e_addr = ((env->cr[3] & ~0xfff) +
|
||||
(((addr >> 48) & 0x1ff) << 3)) & a20_mask;
|
||||
pml5e = x86_ldq_phys(cs, pml5e_addr);
|
||||
if (!(pml5e & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pml5e & (rsvd_mask | PG_PSE_MASK)) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
if (!(pml5e & PG_ACCESSED_MASK)) {
|
||||
pml5e |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pml5e_addr, pml5e);
|
||||
}
|
||||
ptep = pml5e ^ PG_NX_MASK;
|
||||
} else {
|
||||
pml5e = env->cr[3];
|
||||
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
|
||||
}
|
||||
|
||||
pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
|
||||
(((addr >> 39) & 0x1ff) << 3)) & a20_mask;
|
||||
pml4e = x86_ldq_phys(cs, pml4e_addr);
|
||||
if (!(pml4e & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
if (!(pml4e & PG_ACCESSED_MASK)) {
|
||||
pml4e |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
|
||||
}
|
||||
ptep &= pml4e ^ PG_NX_MASK;
|
||||
pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
|
||||
a20_mask;
|
||||
pdpe = x86_ldq_phys(cs, pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pdpe & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep &= pdpe ^ PG_NX_MASK;
|
||||
if (!(pdpe & PG_ACCESSED_MASK)) {
|
||||
pdpe |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
|
||||
}
|
||||
if (pdpe & PG_PSE_MASK) {
|
||||
/* 1 GB page */
|
||||
page_size = 1024 * 1024 * 1024;
|
||||
pte_addr = pdpe_addr;
|
||||
pte = pdpe;
|
||||
goto do_check_protect;
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
/* XXX: load them when cr3 is loaded ? */
|
||||
pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
|
||||
a20_mask;
|
||||
pdpe = x86_ldq_phys(cs, pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
rsvd_mask |= PG_HI_USER_MASK;
|
||||
if (pdpe & (rsvd_mask | PG_NX_MASK)) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
|
||||
}
|
||||
|
||||
pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
|
||||
a20_mask;
|
||||
pde = x86_ldq_phys(cs, pde_addr);
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pde & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep &= pde ^ PG_NX_MASK;
|
||||
if (pde & PG_PSE_MASK) {
|
||||
/* 2 MB page */
|
||||
page_size = 2048 * 1024;
|
||||
pte_addr = pde_addr;
|
||||
pte = pde;
|
||||
goto do_check_protect;
|
||||
}
|
||||
/* 4 KB page */
|
||||
if (!(pde & PG_ACCESSED_MASK)) {
|
||||
pde |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pde_addr, pde);
|
||||
}
|
||||
pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
|
||||
a20_mask;
|
||||
pte = x86_ldq_phys(cs, pte_addr);
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pte & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
/* combine pde and pte nx, user and rw protections */
|
||||
ptep &= pte ^ PG_NX_MASK;
|
||||
page_size = 4096;
|
||||
} else {
|
||||
uint32_t pde;
|
||||
|
||||
/* page directory entry */
|
||||
pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
|
||||
a20_mask;
|
||||
pde = x86_ldl_phys(cs, pde_addr);
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
ptep = pde | PG_NX_MASK;
|
||||
|
||||
/* if PSE bit is set, then we use a 4MB page */
|
||||
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
|
||||
page_size = 4096 * 1024;
|
||||
pte_addr = pde_addr;
|
||||
|
||||
/* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
|
||||
* Leave bits 20-13 in place for setting accessed/dirty bits below.
|
||||
*/
|
||||
pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
|
||||
rsvd_mask = 0x200000;
|
||||
goto do_check_protect_pse36;
|
||||
}
|
||||
|
||||
if (!(pde & PG_ACCESSED_MASK)) {
|
||||
pde |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pde_addr, pde);
|
||||
}
|
||||
|
||||
/* page directory entry */
|
||||
pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
|
||||
a20_mask;
|
||||
pte = x86_ldl_phys(cs, pte_addr);
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
/* combine pde and pte user and rw protections */
|
||||
ptep &= pte | PG_NX_MASK;
|
||||
page_size = 4096;
|
||||
rsvd_mask = 0;
|
||||
}
|
||||
|
||||
do_check_protect:
|
||||
rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
|
||||
do_check_protect_pse36:
|
||||
if (pte & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep ^= PG_NX_MASK;
|
||||
|
||||
/* can the page can be put in the TLB? prot will tell us */
|
||||
if (is_user && !(ptep & PG_USER_MASK)) {
|
||||
goto do_fault_protect;
|
||||
}
|
||||
|
||||
prot = 0;
|
||||
if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
|
||||
prot |= PAGE_READ;
|
||||
if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
|
||||
prot |= PAGE_WRITE;
|
||||
}
|
||||
}
|
||||
if (!(ptep & PG_NX_MASK) &&
|
||||
(mmu_idx == MMU_USER_IDX ||
|
||||
!((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
|
||||
prot |= PAGE_EXEC;
|
||||
}
|
||||
if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) &&
|
||||
(ptep & PG_USER_MASK) && env->pkru) {
|
||||
uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
|
||||
uint32_t pkru_ad = (env->pkru >> pk * 2) & 1;
|
||||
uint32_t pkru_wd = (env->pkru >> pk * 2) & 2;
|
||||
uint32_t pkru_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||
|
||||
if (pkru_ad) {
|
||||
pkru_prot &= ~(PAGE_READ | PAGE_WRITE);
|
||||
} else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) {
|
||||
pkru_prot &= ~PAGE_WRITE;
|
||||
}
|
||||
|
||||
prot &= pkru_prot;
|
||||
if ((pkru_prot & (1 << is_write1)) == 0) {
|
||||
assert(is_write1 != 2);
|
||||
error_code |= PG_ERROR_PK_MASK;
|
||||
goto do_fault_protect;
|
||||
}
|
||||
}
|
||||
|
||||
if ((prot & (1 << is_write1)) == 0) {
|
||||
goto do_fault_protect;
|
||||
}
|
||||
|
||||
/* yes, it can! */
|
||||
is_dirty = is_write && !(pte & PG_DIRTY_MASK);
|
||||
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
|
||||
pte |= PG_ACCESSED_MASK;
|
||||
if (is_dirty) {
|
||||
pte |= PG_DIRTY_MASK;
|
||||
}
|
||||
x86_stl_phys_notdirty(cs, pte_addr, pte);
|
||||
}
|
||||
|
||||
if (!(pte & PG_DIRTY_MASK)) {
|
||||
/* only set write access if already dirty... otherwise wait
|
||||
for dirty access */
|
||||
assert(!is_write);
|
||||
prot &= ~PAGE_WRITE;
|
||||
}
|
||||
|
||||
do_mapping:
|
||||
pte = pte & a20_mask;
|
||||
|
||||
/* align to page_size */
|
||||
pte &= PG_ADDRESS_MASK & ~(page_size - 1);
|
||||
|
||||
/* Even if 4MB pages, we map only one 4KB page in the cache to
|
||||
avoid filling it too fast */
|
||||
vaddr = addr & TARGET_PAGE_MASK;
|
||||
page_offset = vaddr & (page_size - 1);
|
||||
paddr = pte + page_offset;
|
||||
|
||||
assert(prot & (1 << is_write1));
|
||||
tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
|
||||
prot, mmu_idx, page_size);
|
||||
return 0;
|
||||
do_fault_rsvd:
|
||||
error_code |= PG_ERROR_RSVD_MASK;
|
||||
do_fault_protect:
|
||||
error_code |= PG_ERROR_P_MASK;
|
||||
do_fault:
|
||||
error_code |= (is_write << PG_ERROR_W_BIT);
|
||||
if (is_user)
|
||||
error_code |= PG_ERROR_U_MASK;
|
||||
if (is_write1 == 2 &&
|
||||
(((env->efer & MSR_EFER_NXE) &&
|
||||
(env->cr[4] & CR4_PAE_MASK)) ||
|
||||
(env->cr[4] & CR4_SMEP_MASK)))
|
||||
error_code |= PG_ERROR_I_D_MASK;
|
||||
if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
|
||||
/* cr2 is not modified in case of exceptions */
|
||||
x86_stq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
|
||||
addr);
|
||||
} else {
|
||||
env->cr[2] = addr;
|
||||
}
|
||||
env->error_code = error_code;
|
||||
cs->exception_index = EXCP0E_PAGE;
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1539,24 +1539,6 @@ void helper_xsetbv(CPUX86State *env, uint32_t ecx, uint64_t mask)
|
|||
raise_exception_ra(env, EXCP0D_GPF, GETPC());
|
||||
}
|
||||
|
||||
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
|
||||
{
|
||||
CPU_LDoubleU temp;
|
||||
|
||||
temp.d = f;
|
||||
*pmant = temp.l.lower;
|
||||
*pexp = temp.l.upper;
|
||||
}
|
||||
|
||||
floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
|
||||
{
|
||||
CPU_LDoubleU temp;
|
||||
|
||||
temp.l.upper = upper;
|
||||
temp.l.lower = mant;
|
||||
return temp.d;
|
||||
}
|
||||
|
||||
/* MMX/SSE */
|
||||
/* XXX: optimize by storing fptt and fptags in the static cpu state */
|
||||
|
||||
|
@ -1568,12 +1550,11 @@ floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
|
|||
#define SSE_RC_CHOP 0x6000
|
||||
#define SSE_FZ 0x8000
|
||||
|
||||
void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr)
|
||||
void update_mxcsr_status(CPUX86State *env)
|
||||
{
|
||||
uint32_t mxcsr = env->mxcsr;
|
||||
int rnd_type;
|
||||
|
||||
env->mxcsr = mxcsr;
|
||||
|
||||
/* set rounding mode */
|
||||
switch (mxcsr & SSE_RC_MASK) {
|
||||
default:
|
||||
|
@ -1599,12 +1580,6 @@ void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr)
|
|||
set_flush_to_zero((mxcsr & SSE_FZ) ? 1 : 0, &env->fp_status);
|
||||
}
|
||||
|
||||
void cpu_set_fpuc(CPUX86State *env, uint16_t val)
|
||||
{
|
||||
env->fpuc = val;
|
||||
update_fp_status(env);
|
||||
}
|
||||
|
||||
void helper_ldmxcsr(CPUX86State *env, uint32_t val)
|
||||
{
|
||||
cpu_set_mxcsr(env, val);
|
||||
|
|
|
@ -232,7 +232,7 @@ int hax_init_vcpu(CPUState *cpu)
|
|||
}
|
||||
|
||||
cpu->hax_vcpu = hax_global.vm->vcpus[cpu->cpu_index];
|
||||
cpu->hax_vcpu_dirty = true;
|
||||
cpu->vcpu_dirty = true;
|
||||
qemu_register_reset(hax_reset_vcpu_state, (CPUArchState *) (cpu->env_ptr));
|
||||
|
||||
return ret;
|
||||
|
@ -599,12 +599,12 @@ static void do_hax_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
|
|||
CPUArchState *env = cpu->env_ptr;
|
||||
|
||||
hax_arch_get_registers(env);
|
||||
cpu->hax_vcpu_dirty = true;
|
||||
cpu->vcpu_dirty = true;
|
||||
}
|
||||
|
||||
void hax_cpu_synchronize_state(CPUState *cpu)
|
||||
{
|
||||
if (!cpu->hax_vcpu_dirty) {
|
||||
if (!cpu->vcpu_dirty) {
|
||||
run_on_cpu(cpu, do_hax_cpu_synchronize_state, RUN_ON_CPU_NULL);
|
||||
}
|
||||
}
|
||||
|
@ -615,7 +615,7 @@ static void do_hax_cpu_synchronize_post_reset(CPUState *cpu,
|
|||
CPUArchState *env = cpu->env_ptr;
|
||||
|
||||
hax_vcpu_sync_state(env, 1);
|
||||
cpu->hax_vcpu_dirty = false;
|
||||
cpu->vcpu_dirty = false;
|
||||
}
|
||||
|
||||
void hax_cpu_synchronize_post_reset(CPUState *cpu)
|
||||
|
@ -628,7 +628,7 @@ static void do_hax_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
|
|||
CPUArchState *env = cpu->env_ptr;
|
||||
|
||||
hax_vcpu_sync_state(env, 1);
|
||||
cpu->hax_vcpu_dirty = false;
|
||||
cpu->vcpu_dirty = false;
|
||||
}
|
||||
|
||||
void hax_cpu_synchronize_post_init(CPUState *cpu)
|
||||
|
@ -638,7 +638,7 @@ void hax_cpu_synchronize_post_init(CPUState *cpu)
|
|||
|
||||
static void do_hax_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
cpu->hax_vcpu_dirty = true;
|
||||
cpu->vcpu_dirty = true;
|
||||
}
|
||||
|
||||
void hax_cpu_synchronize_pre_loadvm(CPUState *cpu)
|
||||
|
|
|
@ -29,6 +29,36 @@
|
|||
#include "hw/i386/apic_internal.h"
|
||||
#endif
|
||||
|
||||
void cpu_sync_bndcs_hflags(CPUX86State *env)
|
||||
{
|
||||
uint32_t hflags = env->hflags;
|
||||
uint32_t hflags2 = env->hflags2;
|
||||
uint32_t bndcsr;
|
||||
|
||||
if ((hflags & HF_CPL_MASK) == 3) {
|
||||
bndcsr = env->bndcs_regs.cfgu;
|
||||
} else {
|
||||
bndcsr = env->msr_bndcfgs;
|
||||
}
|
||||
|
||||
if ((env->cr[4] & CR4_OSXSAVE_MASK)
|
||||
&& (env->xcr0 & XSTATE_BNDCSR_MASK)
|
||||
&& (bndcsr & BNDCFG_ENABLE)) {
|
||||
hflags |= HF_MPX_EN_MASK;
|
||||
} else {
|
||||
hflags &= ~HF_MPX_EN_MASK;
|
||||
}
|
||||
|
||||
if (bndcsr & BNDCFG_BNDPRESERVE) {
|
||||
hflags2 |= HF2_MPX_PR_MASK;
|
||||
} else {
|
||||
hflags2 &= ~HF2_MPX_PR_MASK;
|
||||
}
|
||||
|
||||
env->hflags = hflags;
|
||||
env->hflags2 = hflags2;
|
||||
}
|
||||
|
||||
static void cpu_x86_version(CPUX86State *env, int *family, int *model)
|
||||
{
|
||||
int cpuver = env->cpuid_version;
|
||||
|
@ -692,349 +722,7 @@ void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
|
|||
cpu_sync_bndcs_hflags(env);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
|
||||
int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
|
||||
int is_write, int mmu_idx)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
/* user mode only emulation */
|
||||
is_write &= 1;
|
||||
env->cr[2] = addr;
|
||||
env->error_code = (is_write << PG_ERROR_W_BIT);
|
||||
env->error_code |= PG_ERROR_U_MASK;
|
||||
cs->exception_index = EXCP0E_PAGE;
|
||||
env->exception_is_int = 0;
|
||||
env->exception_next_eip = -1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
/* return value:
|
||||
* -1 = cannot handle fault
|
||||
* 0 = nothing more to do
|
||||
* 1 = generate PF fault
|
||||
*/
|
||||
int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
|
||||
int is_write1, int mmu_idx)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
uint64_t ptep, pte;
|
||||
int32_t a20_mask;
|
||||
target_ulong pde_addr, pte_addr;
|
||||
int error_code = 0;
|
||||
int is_dirty, prot, page_size, is_write, is_user;
|
||||
hwaddr paddr;
|
||||
uint64_t rsvd_mask = PG_HI_RSVD_MASK;
|
||||
uint32_t page_offset;
|
||||
target_ulong vaddr;
|
||||
|
||||
is_user = mmu_idx == MMU_USER_IDX;
|
||||
#if defined(DEBUG_MMU)
|
||||
printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
|
||||
addr, is_write1, is_user, env->eip);
|
||||
#endif
|
||||
is_write = is_write1 & 1;
|
||||
|
||||
a20_mask = x86_get_a20_mask(env);
|
||||
if (!(env->cr[0] & CR0_PG_MASK)) {
|
||||
pte = addr;
|
||||
#ifdef TARGET_X86_64
|
||||
if (!(env->hflags & HF_LMA_MASK)) {
|
||||
/* Without long mode we can only address 32bits in real mode */
|
||||
pte = (uint32_t)pte;
|
||||
}
|
||||
#endif
|
||||
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||
page_size = 4096;
|
||||
goto do_mapping;
|
||||
}
|
||||
|
||||
if (!(env->efer & MSR_EFER_NXE)) {
|
||||
rsvd_mask |= PG_NX_MASK;
|
||||
}
|
||||
|
||||
if (env->cr[4] & CR4_PAE_MASK) {
|
||||
uint64_t pde, pdpe;
|
||||
target_ulong pdpe_addr;
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
if (env->hflags & HF_LMA_MASK) {
|
||||
bool la57 = env->cr[4] & CR4_LA57_MASK;
|
||||
uint64_t pml5e_addr, pml5e;
|
||||
uint64_t pml4e_addr, pml4e;
|
||||
int32_t sext;
|
||||
|
||||
/* test virtual address sign extension */
|
||||
sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
|
||||
if (sext != 0 && sext != -1) {
|
||||
env->error_code = 0;
|
||||
cs->exception_index = EXCP0D_GPF;
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (la57) {
|
||||
pml5e_addr = ((env->cr[3] & ~0xfff) +
|
||||
(((addr >> 48) & 0x1ff) << 3)) & a20_mask;
|
||||
pml5e = x86_ldq_phys(cs, pml5e_addr);
|
||||
if (!(pml5e & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pml5e & (rsvd_mask | PG_PSE_MASK)) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
if (!(pml5e & PG_ACCESSED_MASK)) {
|
||||
pml5e |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pml5e_addr, pml5e);
|
||||
}
|
||||
ptep = pml5e ^ PG_NX_MASK;
|
||||
} else {
|
||||
pml5e = env->cr[3];
|
||||
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
|
||||
}
|
||||
|
||||
pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
|
||||
(((addr >> 39) & 0x1ff) << 3)) & a20_mask;
|
||||
pml4e = x86_ldq_phys(cs, pml4e_addr);
|
||||
if (!(pml4e & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
if (!(pml4e & PG_ACCESSED_MASK)) {
|
||||
pml4e |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
|
||||
}
|
||||
ptep &= pml4e ^ PG_NX_MASK;
|
||||
pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
|
||||
a20_mask;
|
||||
pdpe = x86_ldq_phys(cs, pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pdpe & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep &= pdpe ^ PG_NX_MASK;
|
||||
if (!(pdpe & PG_ACCESSED_MASK)) {
|
||||
pdpe |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
|
||||
}
|
||||
if (pdpe & PG_PSE_MASK) {
|
||||
/* 1 GB page */
|
||||
page_size = 1024 * 1024 * 1024;
|
||||
pte_addr = pdpe_addr;
|
||||
pte = pdpe;
|
||||
goto do_check_protect;
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
/* XXX: load them when cr3 is loaded ? */
|
||||
pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
|
||||
a20_mask;
|
||||
pdpe = x86_ldq_phys(cs, pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
rsvd_mask |= PG_HI_USER_MASK;
|
||||
if (pdpe & (rsvd_mask | PG_NX_MASK)) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
|
||||
}
|
||||
|
||||
pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
|
||||
a20_mask;
|
||||
pde = x86_ldq_phys(cs, pde_addr);
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pde & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep &= pde ^ PG_NX_MASK;
|
||||
if (pde & PG_PSE_MASK) {
|
||||
/* 2 MB page */
|
||||
page_size = 2048 * 1024;
|
||||
pte_addr = pde_addr;
|
||||
pte = pde;
|
||||
goto do_check_protect;
|
||||
}
|
||||
/* 4 KB page */
|
||||
if (!(pde & PG_ACCESSED_MASK)) {
|
||||
pde |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pde_addr, pde);
|
||||
}
|
||||
pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
|
||||
a20_mask;
|
||||
pte = x86_ldq_phys(cs, pte_addr);
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pte & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
/* combine pde and pte nx, user and rw protections */
|
||||
ptep &= pte ^ PG_NX_MASK;
|
||||
page_size = 4096;
|
||||
} else {
|
||||
uint32_t pde;
|
||||
|
||||
/* page directory entry */
|
||||
pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
|
||||
a20_mask;
|
||||
pde = x86_ldl_phys(cs, pde_addr);
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
ptep = pde | PG_NX_MASK;
|
||||
|
||||
/* if PSE bit is set, then we use a 4MB page */
|
||||
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
|
||||
page_size = 4096 * 1024;
|
||||
pte_addr = pde_addr;
|
||||
|
||||
/* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
|
||||
* Leave bits 20-13 in place for setting accessed/dirty bits below.
|
||||
*/
|
||||
pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
|
||||
rsvd_mask = 0x200000;
|
||||
goto do_check_protect_pse36;
|
||||
}
|
||||
|
||||
if (!(pde & PG_ACCESSED_MASK)) {
|
||||
pde |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pde_addr, pde);
|
||||
}
|
||||
|
||||
/* page directory entry */
|
||||
pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
|
||||
a20_mask;
|
||||
pte = x86_ldl_phys(cs, pte_addr);
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
/* combine pde and pte user and rw protections */
|
||||
ptep &= pte | PG_NX_MASK;
|
||||
page_size = 4096;
|
||||
rsvd_mask = 0;
|
||||
}
|
||||
|
||||
do_check_protect:
|
||||
rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
|
||||
do_check_protect_pse36:
|
||||
if (pte & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep ^= PG_NX_MASK;
|
||||
|
||||
/* can the page can be put in the TLB? prot will tell us */
|
||||
if (is_user && !(ptep & PG_USER_MASK)) {
|
||||
goto do_fault_protect;
|
||||
}
|
||||
|
||||
prot = 0;
|
||||
if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
|
||||
prot |= PAGE_READ;
|
||||
if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
|
||||
prot |= PAGE_WRITE;
|
||||
}
|
||||
}
|
||||
if (!(ptep & PG_NX_MASK) &&
|
||||
(mmu_idx == MMU_USER_IDX ||
|
||||
!((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
|
||||
prot |= PAGE_EXEC;
|
||||
}
|
||||
if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) &&
|
||||
(ptep & PG_USER_MASK) && env->pkru) {
|
||||
uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
|
||||
uint32_t pkru_ad = (env->pkru >> pk * 2) & 1;
|
||||
uint32_t pkru_wd = (env->pkru >> pk * 2) & 2;
|
||||
uint32_t pkru_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||
|
||||
if (pkru_ad) {
|
||||
pkru_prot &= ~(PAGE_READ | PAGE_WRITE);
|
||||
} else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) {
|
||||
pkru_prot &= ~PAGE_WRITE;
|
||||
}
|
||||
|
||||
prot &= pkru_prot;
|
||||
if ((pkru_prot & (1 << is_write1)) == 0) {
|
||||
assert(is_write1 != 2);
|
||||
error_code |= PG_ERROR_PK_MASK;
|
||||
goto do_fault_protect;
|
||||
}
|
||||
}
|
||||
|
||||
if ((prot & (1 << is_write1)) == 0) {
|
||||
goto do_fault_protect;
|
||||
}
|
||||
|
||||
/* yes, it can! */
|
||||
is_dirty = is_write && !(pte & PG_DIRTY_MASK);
|
||||
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
|
||||
pte |= PG_ACCESSED_MASK;
|
||||
if (is_dirty) {
|
||||
pte |= PG_DIRTY_MASK;
|
||||
}
|
||||
x86_stl_phys_notdirty(cs, pte_addr, pte);
|
||||
}
|
||||
|
||||
if (!(pte & PG_DIRTY_MASK)) {
|
||||
/* only set write access if already dirty... otherwise wait
|
||||
for dirty access */
|
||||
assert(!is_write);
|
||||
prot &= ~PAGE_WRITE;
|
||||
}
|
||||
|
||||
do_mapping:
|
||||
pte = pte & a20_mask;
|
||||
|
||||
/* align to page_size */
|
||||
pte &= PG_ADDRESS_MASK & ~(page_size - 1);
|
||||
|
||||
/* Even if 4MB pages, we map only one 4KB page in the cache to
|
||||
avoid filling it too fast */
|
||||
vaddr = addr & TARGET_PAGE_MASK;
|
||||
page_offset = vaddr & (page_size - 1);
|
||||
paddr = pte + page_offset;
|
||||
|
||||
assert(prot & (1 << is_write1));
|
||||
tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
|
||||
prot, mmu_idx, page_size);
|
||||
return 0;
|
||||
do_fault_rsvd:
|
||||
error_code |= PG_ERROR_RSVD_MASK;
|
||||
do_fault_protect:
|
||||
error_code |= PG_ERROR_P_MASK;
|
||||
do_fault:
|
||||
error_code |= (is_write << PG_ERROR_W_BIT);
|
||||
if (is_user)
|
||||
error_code |= PG_ERROR_U_MASK;
|
||||
if (is_write1 == 2 &&
|
||||
(((env->efer & MSR_EFER_NXE) &&
|
||||
(env->cr[4] & CR4_PAE_MASK)) ||
|
||||
(env->cr[4] & CR4_SMEP_MASK)))
|
||||
error_code |= PG_ERROR_I_D_MASK;
|
||||
if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
|
||||
/* cr2 is not modified in case of exceptions */
|
||||
x86_stq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
|
||||
addr);
|
||||
} else {
|
||||
env->cr[2] = addr;
|
||||
}
|
||||
env->error_code = error_code;
|
||||
cs->exception_index = EXCP0E_PAGE;
|
||||
return 1;
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
|
@ -1302,7 +990,7 @@ void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
|
|||
env->tpr_access_type = access;
|
||||
|
||||
cpu_interrupt(cs, CPU_INTERRUPT_TPR);
|
||||
} else {
|
||||
} else if (tcg_enabled()) {
|
||||
cpu_restore_state(cs, cs->mem_io_pc);
|
||||
|
||||
apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
|
||||
|
|
|
@ -1433,56 +1433,12 @@ static int kvm_put_xsave(X86CPU *cpu)
|
|||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
X86XSaveArea *xsave = env->kvm_xsave_buf;
|
||||
uint16_t cwd, swd, twd;
|
||||
int i;
|
||||
|
||||
if (!has_xsave) {
|
||||
return kvm_put_fpu(cpu);
|
||||
}
|
||||
x86_cpu_xsave_all_areas(cpu, xsave);
|
||||
|
||||
memset(xsave, 0, sizeof(struct kvm_xsave));
|
||||
twd = 0;
|
||||
swd = env->fpus & ~(7 << 11);
|
||||
swd |= (env->fpstt & 7) << 11;
|
||||
cwd = env->fpuc;
|
||||
for (i = 0; i < 8; ++i) {
|
||||
twd |= (!env->fptags[i]) << i;
|
||||
}
|
||||
xsave->legacy.fcw = cwd;
|
||||
xsave->legacy.fsw = swd;
|
||||
xsave->legacy.ftw = twd;
|
||||
xsave->legacy.fpop = env->fpop;
|
||||
xsave->legacy.fpip = env->fpip;
|
||||
xsave->legacy.fpdp = env->fpdp;
|
||||
memcpy(&xsave->legacy.fpregs, env->fpregs,
|
||||
sizeof env->fpregs);
|
||||
xsave->legacy.mxcsr = env->mxcsr;
|
||||
xsave->header.xstate_bv = env->xstate_bv;
|
||||
memcpy(&xsave->bndreg_state.bnd_regs, env->bnd_regs,
|
||||
sizeof env->bnd_regs);
|
||||
xsave->bndcsr_state.bndcsr = env->bndcs_regs;
|
||||
memcpy(&xsave->opmask_state.opmask_regs, env->opmask_regs,
|
||||
sizeof env->opmask_regs);
|
||||
|
||||
for (i = 0; i < CPU_NB_REGS; i++) {
|
||||
uint8_t *xmm = xsave->legacy.xmm_regs[i];
|
||||
uint8_t *ymmh = xsave->avx_state.ymmh[i];
|
||||
uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
|
||||
stq_p(xmm, env->xmm_regs[i].ZMM_Q(0));
|
||||
stq_p(xmm+8, env->xmm_regs[i].ZMM_Q(1));
|
||||
stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
|
||||
stq_p(ymmh+8, env->xmm_regs[i].ZMM_Q(3));
|
||||
stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
|
||||
stq_p(zmmh+8, env->xmm_regs[i].ZMM_Q(5));
|
||||
stq_p(zmmh+16, env->xmm_regs[i].ZMM_Q(6));
|
||||
stq_p(zmmh+24, env->xmm_regs[i].ZMM_Q(7));
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
memcpy(&xsave->hi16_zmm_state.hi16_zmm, &env->xmm_regs[16],
|
||||
16 * sizeof env->xmm_regs[16]);
|
||||
memcpy(&xsave->pkru_state, &env->pkru, sizeof env->pkru);
|
||||
#endif
|
||||
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
|
||||
}
|
||||
|
||||
|
@ -1868,8 +1824,7 @@ static int kvm_get_xsave(X86CPU *cpu)
|
|||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
X86XSaveArea *xsave = env->kvm_xsave_buf;
|
||||
int ret, i;
|
||||
uint16_t cwd, swd, twd;
|
||||
int ret;
|
||||
|
||||
if (!has_xsave) {
|
||||
return kvm_get_fpu(cpu);
|
||||
|
@ -1879,48 +1834,8 @@ static int kvm_get_xsave(X86CPU *cpu)
|
|||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
x86_cpu_xrstor_all_areas(cpu, xsave);
|
||||
|
||||
cwd = xsave->legacy.fcw;
|
||||
swd = xsave->legacy.fsw;
|
||||
twd = xsave->legacy.ftw;
|
||||
env->fpop = xsave->legacy.fpop;
|
||||
env->fpstt = (swd >> 11) & 7;
|
||||
env->fpus = swd;
|
||||
env->fpuc = cwd;
|
||||
for (i = 0; i < 8; ++i) {
|
||||
env->fptags[i] = !((twd >> i) & 1);
|
||||
}
|
||||
env->fpip = xsave->legacy.fpip;
|
||||
env->fpdp = xsave->legacy.fpdp;
|
||||
env->mxcsr = xsave->legacy.mxcsr;
|
||||
memcpy(env->fpregs, &xsave->legacy.fpregs,
|
||||
sizeof env->fpregs);
|
||||
env->xstate_bv = xsave->header.xstate_bv;
|
||||
memcpy(env->bnd_regs, &xsave->bndreg_state.bnd_regs,
|
||||
sizeof env->bnd_regs);
|
||||
env->bndcs_regs = xsave->bndcsr_state.bndcsr;
|
||||
memcpy(env->opmask_regs, &xsave->opmask_state.opmask_regs,
|
||||
sizeof env->opmask_regs);
|
||||
|
||||
for (i = 0; i < CPU_NB_REGS; i++) {
|
||||
uint8_t *xmm = xsave->legacy.xmm_regs[i];
|
||||
uint8_t *ymmh = xsave->avx_state.ymmh[i];
|
||||
uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
|
||||
env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
|
||||
env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm+8);
|
||||
env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
|
||||
env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh+8);
|
||||
env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
|
||||
env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh+8);
|
||||
env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh+16);
|
||||
env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh+24);
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
memcpy(&env->xmm_regs[16], &xsave->hi16_zmm_state.hi16_zmm,
|
||||
16 * sizeof env->xmm_regs[16]);
|
||||
memcpy(&env->pkru, &xsave->pkru_state, sizeof env->pkru);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,29 @@
|
|||
|
||||
#define kvm_apic_in_kernel() (kvm_irqchip_in_kernel())
|
||||
|
||||
#ifdef CONFIG_KVM
|
||||
|
||||
#define kvm_pit_in_kernel() \
|
||||
(kvm_irqchip_in_kernel() && !kvm_irqchip_is_split())
|
||||
#define kvm_pic_in_kernel() \
|
||||
(kvm_irqchip_in_kernel() && !kvm_irqchip_is_split())
|
||||
#define kvm_ioapic_in_kernel() \
|
||||
(kvm_irqchip_in_kernel() && !kvm_irqchip_is_split())
|
||||
|
||||
#else
|
||||
|
||||
#define kvm_pit_in_kernel() 0
|
||||
#define kvm_pic_in_kernel() 0
|
||||
#define kvm_ioapic_in_kernel() 0
|
||||
|
||||
/* These constants must never be used at runtime if kvm_enabled() is false.
|
||||
* They exist so we don't need #ifdefs around KVM-specific code that already
|
||||
* checks kvm_enabled() properly.
|
||||
*/
|
||||
#define KVM_CPUID_FEATURES 0
|
||||
|
||||
#endif /* CONFIG_KVM */
|
||||
|
||||
bool kvm_allows_irq0_override(void);
|
||||
bool kvm_has_smm(void);
|
||||
bool kvm_has_adjust_clock_stable(void);
|
||||
|
|
|
@ -142,6 +142,24 @@ typedef struct x86_FPReg_tmp {
|
|||
uint16_t tmp_exp;
|
||||
} x86_FPReg_tmp;
|
||||
|
||||
static void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
|
||||
{
|
||||
CPU_LDoubleU temp;
|
||||
|
||||
temp.d = f;
|
||||
*pmant = temp.l.lower;
|
||||
*pexp = temp.l.upper;
|
||||
}
|
||||
|
||||
static floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
|
||||
{
|
||||
CPU_LDoubleU temp;
|
||||
|
||||
temp.l.upper = upper;
|
||||
temp.l.lower = mant;
|
||||
return temp.d;
|
||||
}
|
||||
|
||||
static void fpreg_pre_save(void *opaque)
|
||||
{
|
||||
x86_FPReg_tmp *tmp = opaque;
|
||||
|
@ -262,14 +280,17 @@ static int cpu_post_load(void *opaque, int version_id)
|
|||
for(i = 0; i < 8; i++) {
|
||||
env->fptags[i] = (env->fptag_vmstate >> i) & 1;
|
||||
}
|
||||
update_fp_status(env);
|
||||
if (tcg_enabled()) {
|
||||
target_ulong dr7;
|
||||
update_fp_status(env);
|
||||
update_mxcsr_status(env);
|
||||
|
||||
cpu_breakpoint_remove_all(cs, BP_CPU);
|
||||
cpu_watchpoint_remove_all(cs, BP_CPU);
|
||||
|
||||
cpu_breakpoint_remove_all(cs, BP_CPU);
|
||||
cpu_watchpoint_remove_all(cs, BP_CPU);
|
||||
{
|
||||
/* Indicate all breakpoints disabled, as they are, then
|
||||
let the helper re-enable them. */
|
||||
target_ulong dr7 = env->dr[7];
|
||||
dr7 = env->dr[7];
|
||||
env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK);
|
||||
cpu_x86_update_dr7(env, dr7);
|
||||
}
|
||||
|
|
|
@ -24,36 +24,6 @@
|
|||
#include "exec/exec-all.h"
|
||||
|
||||
|
||||
void cpu_sync_bndcs_hflags(CPUX86State *env)
|
||||
{
|
||||
uint32_t hflags = env->hflags;
|
||||
uint32_t hflags2 = env->hflags2;
|
||||
uint32_t bndcsr;
|
||||
|
||||
if ((hflags & HF_CPL_MASK) == 3) {
|
||||
bndcsr = env->bndcs_regs.cfgu;
|
||||
} else {
|
||||
bndcsr = env->msr_bndcfgs;
|
||||
}
|
||||
|
||||
if ((env->cr[4] & CR4_OSXSAVE_MASK)
|
||||
&& (env->xcr0 & XSTATE_BNDCSR_MASK)
|
||||
&& (bndcsr & BNDCFG_ENABLE)) {
|
||||
hflags |= HF_MPX_EN_MASK;
|
||||
} else {
|
||||
hflags &= ~HF_MPX_EN_MASK;
|
||||
}
|
||||
|
||||
if (bndcsr & BNDCFG_BNDPRESERVE) {
|
||||
hflags2 |= HF2_MPX_PR_MASK;
|
||||
} else {
|
||||
hflags2 &= ~HF2_MPX_PR_MASK;
|
||||
}
|
||||
|
||||
env->hflags = hflags;
|
||||
env->hflags2 = hflags2;
|
||||
}
|
||||
|
||||
void helper_bndck(CPUX86State *env, uint32_t fail)
|
||||
{
|
||||
if (unlikely(fail)) {
|
||||
|
|
|
@ -692,7 +692,10 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
|
|||
if (!(e2 & DESC_P_MASK)) {
|
||||
raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
|
||||
}
|
||||
if (!(e2 & DESC_C_MASK) && dpl < cpl) {
|
||||
if (e2 & DESC_C_MASK) {
|
||||
dpl = cpl;
|
||||
}
|
||||
if (dpl < cpl) {
|
||||
/* to inner privilege */
|
||||
get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
|
||||
if ((ss & 0xfffc) == 0) {
|
||||
|
@ -719,7 +722,7 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
|
|||
new_stack = 1;
|
||||
sp_mask = get_sp_mask(ss_e2);
|
||||
ssp = get_seg_base(ss_e1, ss_e2);
|
||||
} else if ((e2 & DESC_C_MASK) || dpl == cpl) {
|
||||
} else {
|
||||
/* to same privilege */
|
||||
if (vm86) {
|
||||
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
|
||||
|
@ -728,13 +731,6 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
|
|||
sp_mask = get_sp_mask(env->segs[R_SS].flags);
|
||||
ssp = env->segs[R_SS].base;
|
||||
esp = env->regs[R_ESP];
|
||||
dpl = cpl;
|
||||
} else {
|
||||
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
|
||||
new_stack = 0; /* avoid warning */
|
||||
sp_mask = 0; /* avoid warning */
|
||||
ssp = 0; /* avoid warning */
|
||||
esp = 0; /* avoid warning */
|
||||
}
|
||||
|
||||
shift = type >> 3;
|
||||
|
@ -919,23 +915,21 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
|
|||
if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
|
||||
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
|
||||
}
|
||||
if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
|
||||
if (e2 & DESC_C_MASK) {
|
||||
dpl = cpl;
|
||||
}
|
||||
if (dpl < cpl || ist != 0) {
|
||||
/* to inner privilege */
|
||||
new_stack = 1;
|
||||
esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
|
||||
ss = 0;
|
||||
} else if ((e2 & DESC_C_MASK) || dpl == cpl) {
|
||||
} else {
|
||||
/* to same privilege */
|
||||
if (env->eflags & VM_MASK) {
|
||||
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
|
||||
}
|
||||
new_stack = 0;
|
||||
esp = env->regs[R_ESP];
|
||||
dpl = cpl;
|
||||
} else {
|
||||
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
|
||||
new_stack = 0; /* avoid warning */
|
||||
esp = 0; /* avoid warning */
|
||||
}
|
||||
esp &= ~0xfLL; /* align stack */
|
||||
|
||||
|
@ -956,7 +950,7 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
|
|||
|
||||
if (new_stack) {
|
||||
ss = 0 | dpl;
|
||||
cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
|
||||
cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
|
||||
}
|
||||
env->regs[R_ESP] = esp;
|
||||
|
||||
|
|
114
target/i386/xsave_helper.c
Normal file
114
target/i386/xsave_helper.c
Normal file
|
@ -0,0 +1,114 @@
|
|||
/*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
#include "qemu/osdep.h"
|
||||
#include "qapi/error.h"
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "cpu.h"
|
||||
|
||||
void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
X86XSaveArea *xsave = buf;
|
||||
|
||||
uint16_t cwd, swd, twd;
|
||||
int i;
|
||||
memset(xsave, 0, sizeof(X86XSaveArea));
|
||||
twd = 0;
|
||||
swd = env->fpus & ~(7 << 11);
|
||||
swd |= (env->fpstt & 7) << 11;
|
||||
cwd = env->fpuc;
|
||||
for (i = 0; i < 8; ++i) {
|
||||
twd |= (!env->fptags[i]) << i;
|
||||
}
|
||||
xsave->legacy.fcw = cwd;
|
||||
xsave->legacy.fsw = swd;
|
||||
xsave->legacy.ftw = twd;
|
||||
xsave->legacy.fpop = env->fpop;
|
||||
xsave->legacy.fpip = env->fpip;
|
||||
xsave->legacy.fpdp = env->fpdp;
|
||||
memcpy(&xsave->legacy.fpregs, env->fpregs,
|
||||
sizeof env->fpregs);
|
||||
xsave->legacy.mxcsr = env->mxcsr;
|
||||
xsave->header.xstate_bv = env->xstate_bv;
|
||||
memcpy(&xsave->bndreg_state.bnd_regs, env->bnd_regs,
|
||||
sizeof env->bnd_regs);
|
||||
xsave->bndcsr_state.bndcsr = env->bndcs_regs;
|
||||
memcpy(&xsave->opmask_state.opmask_regs, env->opmask_regs,
|
||||
sizeof env->opmask_regs);
|
||||
|
||||
for (i = 0; i < CPU_NB_REGS; i++) {
|
||||
uint8_t *xmm = xsave->legacy.xmm_regs[i];
|
||||
uint8_t *ymmh = xsave->avx_state.ymmh[i];
|
||||
uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
|
||||
stq_p(xmm, env->xmm_regs[i].ZMM_Q(0));
|
||||
stq_p(xmm+8, env->xmm_regs[i].ZMM_Q(1));
|
||||
stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
|
||||
stq_p(ymmh+8, env->xmm_regs[i].ZMM_Q(3));
|
||||
stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
|
||||
stq_p(zmmh+8, env->xmm_regs[i].ZMM_Q(5));
|
||||
stq_p(zmmh+16, env->xmm_regs[i].ZMM_Q(6));
|
||||
stq_p(zmmh+24, env->xmm_regs[i].ZMM_Q(7));
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
memcpy(&xsave->hi16_zmm_state.hi16_zmm, &env->xmm_regs[16],
|
||||
16 * sizeof env->xmm_regs[16]);
|
||||
memcpy(&xsave->pkru_state, &env->pkru, sizeof env->pkru);
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf)
|
||||
{
|
||||
|
||||
CPUX86State *env = &cpu->env;
|
||||
const X86XSaveArea *xsave = buf;
|
||||
|
||||
int i;
|
||||
uint16_t cwd, swd, twd;
|
||||
cwd = xsave->legacy.fcw;
|
||||
swd = xsave->legacy.fsw;
|
||||
twd = xsave->legacy.ftw;
|
||||
env->fpop = xsave->legacy.fpop;
|
||||
env->fpstt = (swd >> 11) & 7;
|
||||
env->fpus = swd;
|
||||
env->fpuc = cwd;
|
||||
for (i = 0; i < 8; ++i) {
|
||||
env->fptags[i] = !((twd >> i) & 1);
|
||||
}
|
||||
env->fpip = xsave->legacy.fpip;
|
||||
env->fpdp = xsave->legacy.fpdp;
|
||||
env->mxcsr = xsave->legacy.mxcsr;
|
||||
memcpy(env->fpregs, &xsave->legacy.fpregs,
|
||||
sizeof env->fpregs);
|
||||
env->xstate_bv = xsave->header.xstate_bv;
|
||||
memcpy(env->bnd_regs, &xsave->bndreg_state.bnd_regs,
|
||||
sizeof env->bnd_regs);
|
||||
env->bndcs_regs = xsave->bndcsr_state.bndcsr;
|
||||
memcpy(env->opmask_regs, &xsave->opmask_state.opmask_regs,
|
||||
sizeof env->opmask_regs);
|
||||
|
||||
for (i = 0; i < CPU_NB_REGS; i++) {
|
||||
const uint8_t *xmm = xsave->legacy.xmm_regs[i];
|
||||
const uint8_t *ymmh = xsave->avx_state.ymmh[i];
|
||||
const uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
|
||||
env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
|
||||
env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm+8);
|
||||
env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
|
||||
env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh+8);
|
||||
env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
|
||||
env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh+8);
|
||||
env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh+16);
|
||||
env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh+24);
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
memcpy(&env->xmm_regs[16], &xsave->hi16_zmm_state.hi16_zmm,
|
||||
16 * sizeof env->xmm_regs[16]);
|
||||
memcpy(&env->pkru, &xsave->pkru_state, sizeof env->pkru);
|
||||
#endif
|
||||
|
||||
}
|
|
@ -523,7 +523,7 @@ static void kvm_mips_update_state(void *opaque, int running, RunState state)
|
|||
* already saved and can be restored when it is synced back to KVM.
|
||||
*/
|
||||
if (!running) {
|
||||
if (!cs->kvm_vcpu_dirty) {
|
||||
if (!cs->vcpu_dirty) {
|
||||
ret = kvm_mips_save_count(cs);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed saving count\n");
|
||||
|
@ -539,7 +539,7 @@ static void kvm_mips_update_state(void *opaque, int running, RunState state)
|
|||
return;
|
||||
}
|
||||
|
||||
if (!cs->kvm_vcpu_dirty) {
|
||||
if (!cs->vcpu_dirty) {
|
||||
ret = kvm_mips_restore_count(cs);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed restoring count\n");
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue