i386: hvf: move all hvf files in the same directory

Just call it hvf/, no need for the "utils" suffix.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2017-10-03 15:33:29 +02:00
parent 3010460fb9
commit 69e0a03c3f
25 changed files with 21 additions and 21 deletions

View file

@ -0,0 +1,2 @@
obj-y += hvf.o
obj-y += x86.o x86_cpuid.o x86_decode.o x86_descr.o x86_emu.o x86_flags.o x86_mmu.o x86hvf.o x86_task.o

View file

@ -0,0 +1,7 @@
# OS X Hypervisor.framework support in QEMU
These sources (and ../hvf-all.c) are adapted from Veertu Inc's vdhh (Veertu Desktop Hosted Hypervisor) (last known location: https://github.com/veertuinc/vdhh) with some minor changes, the most significant of which were:
1. Adapt to our current QEMU's `CPUState` structure and `address_space_rw` API; many struct members have been moved around (emulated x86 state, kvm_xsave_buf) due to historical differences + QEMU needing to handle more emulation targets.
2. Removal of `apic_page` and hyperv-related functionality.
3. More relaxed use of `qemu_mutex_lock_iothread`.

View file

@ -0,0 +1,48 @@
/*
* QEMU Hypervisor.framework (HVF) support
*
* Copyright 2017 Google Inc
*
* Adapted from target-i386/hax-i386.h:
* Copyright (c) 2011 Intel Corporation
* Written by:
* Jiang Yunhong<yunhong.jiang@intel.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#ifndef _HVF_I386_H
#define _HVF_I386_H
#include "sysemu/hvf.h"
#include "cpu.h"
#include "x86.h"
#define HVF_MAX_VCPU 0x10
#define MAX_VM_ID 0x40
#define MAX_VCPU_ID 0x40
extern struct hvf_state hvf_global;
struct hvf_vm {
int id;
struct hvf_vcpu_state *vcpus[HVF_MAX_VCPU];
};
struct hvf_state {
uint32_t version;
struct hvf_vm *vm;
uint64_t mem_quota;
};
#ifdef NEED_CPU_H
/* Functions exported to host specific mode */
/* Host specific functions */
int hvf_inject_interrupt(CPUArchState *env, int vector);
int hvf_vcpu_run(struct hvf_vcpu_state *vcpu);
#endif
#endif

961
target/i386/hvf/hvf.c Normal file
View file

@ -0,0 +1,961 @@
/* Copyright 2008 IBM Corporation
* 2008 Red Hat, Inc.
* Copyright 2011 Intel Corporation
* Copyright 2016 Veertu, Inc.
* Copyright 2017 The Android Open Source Project
*
* QEMU Hypervisor.framework support
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "qemu/error-report.h"
#include "sysemu/hvf.h"
#include "hvf-i386.h"
#include "vmcs.h"
#include "vmx.h"
#include "x86.h"
#include "x86_descr.h"
#include "x86_mmu.h"
#include "x86_decode.h"
#include "x86_emu.h"
#include "x86_task.h"
#include "x86hvf.h"
#include <Hypervisor/hv.h>
#include <Hypervisor/hv_vmx.h>
#include "exec/address-spaces.h"
#include "exec/exec-all.h"
#include "exec/ioport.h"
#include "hw/i386/apic_internal.h"
#include "hw/boards.h"
#include "qemu/main-loop.h"
#include "strings.h"
#include "sysemu/accel.h"
#include "sysemu/sysemu.h"
#include "target/i386/cpu.h"
pthread_rwlock_t mem_lock = PTHREAD_RWLOCK_INITIALIZER;
HVFState *hvf_state;
int hvf_disabled = 1;
static void assert_hvf_ok(hv_return_t ret)
{
if (ret == HV_SUCCESS) {
return;
}
switch (ret) {
case HV_ERROR:
error_report("Error: HV_ERROR\n");
break;
case HV_BUSY:
error_report("Error: HV_BUSY\n");
break;
case HV_BAD_ARGUMENT:
error_report("Error: HV_BAD_ARGUMENT\n");
break;
case HV_NO_RESOURCES:
error_report("Error: HV_NO_RESOURCES\n");
break;
case HV_NO_DEVICE:
error_report("Error: HV_NO_DEVICE\n");
break;
case HV_UNSUPPORTED:
error_report("Error: HV_UNSUPPORTED\n");
break;
default:
error_report("Unknown Error\n");
}
abort();
}
/* Memory slots */
hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t end)
{
hvf_slot *slot;
int x;
for (x = 0; x < hvf_state->num_slots; ++x) {
slot = &hvf_state->slots[x];
if (slot->size && start < (slot->start + slot->size) &&
end > slot->start) {
return slot;
}
}
return NULL;
}
struct mac_slot {
int present;
uint64_t size;
uint64_t gpa_start;
uint64_t gva;
};
struct mac_slot mac_slots[32];
#define ALIGN(x, y) (((x) + (y) - 1) & ~((y) - 1))
static int do_hvf_set_memory(hvf_slot *slot)
{
struct mac_slot *macslot;
hv_memory_flags_t flags;
hv_return_t ret;
macslot = &mac_slots[slot->slot_id];
if (macslot->present) {
if (macslot->size != slot->size) {
macslot->present = 0;
ret = hv_vm_unmap(macslot->gpa_start, macslot->size);
assert_hvf_ok(ret);
}
}
if (!slot->size) {
return 0;
}
flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC;
macslot->present = 1;
macslot->gpa_start = slot->start;
macslot->size = slot->size;
ret = hv_vm_map((hv_uvaddr_t)slot->mem, slot->start, slot->size, flags);
assert_hvf_ok(ret);
return 0;
}
void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
{
hvf_slot *mem;
MemoryRegion *area = section->mr;
if (!memory_region_is_ram(area)) {
return;
}
mem = hvf_find_overlap_slot(
section->offset_within_address_space,
section->offset_within_address_space + int128_get64(section->size));
if (mem && add) {
if (mem->size == int128_get64(section->size) &&
mem->start == section->offset_within_address_space &&
mem->mem == (memory_region_get_ram_ptr(area) +
section->offset_within_region)) {
return; /* Same region was attempted to register, go away. */
}
}
/* Region needs to be reset. set the size to 0 and remap it. */
if (mem) {
mem->size = 0;
if (do_hvf_set_memory(mem)) {
error_report("Failed to reset overlapping slot\n");
abort();
}
}
if (!add) {
return;
}
/* Now make a new slot. */
int x;
for (x = 0; x < hvf_state->num_slots; ++x) {
mem = &hvf_state->slots[x];
if (!mem->size) {
break;
}
}
if (x == hvf_state->num_slots) {
error_report("No free slots\n");
abort();
}
mem->size = int128_get64(section->size);
mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region;
mem->start = section->offset_within_address_space;
mem->region = area;
if (do_hvf_set_memory(mem)) {
error_report("Error registering new memory slot\n");
abort();
}
}
void vmx_update_tpr(CPUState *cpu)
{
/* TODO: need integrate APIC handling */
X86CPU *x86_cpu = X86_CPU(cpu);
int tpr = cpu_get_apic_tpr(x86_cpu->apic_state) << 4;
int irr = apic_get_highest_priority_irr(x86_cpu->apic_state);
wreg(cpu->hvf_fd, HV_X86_TPR, tpr);
if (irr == -1) {
wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0);
} else {
wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 :
irr >> 4);
}
}
void update_apic_tpr(CPUState *cpu)
{
X86CPU *x86_cpu = X86_CPU(cpu);
int tpr = rreg(cpu->hvf_fd, HV_X86_TPR) >> 4;
cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
}
#define VECTORING_INFO_VECTOR_MASK 0xff
static void hvf_handle_interrupt(CPUState * cpu, int mask)
{
cpu->interrupt_request |= mask;
if (!qemu_cpu_is_self(cpu)) {
qemu_cpu_kick(cpu);
}
}
void hvf_handle_io(CPUArchState *env, uint16_t port, void *buffer,
int direction, int size, int count)
{
int i;
uint8_t *ptr = buffer;
for (i = 0; i < count; i++) {
address_space_rw(&address_space_io, port, MEMTXATTRS_UNSPECIFIED,
ptr, size,
direction);
ptr += size;
}
}
/* TODO: synchronize vcpu state */
static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
{
CPUState *cpu_state = cpu;
if (cpu_state->vcpu_dirty == 0) {
hvf_get_registers(cpu_state);
}
cpu_state->vcpu_dirty = 1;
}
void hvf_cpu_synchronize_state(CPUState *cpu_state)
{
if (cpu_state->vcpu_dirty == 0) {
run_on_cpu(cpu_state, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
}
}
static void do_hvf_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
{
CPUState *cpu_state = cpu;
hvf_put_registers(cpu_state);
cpu_state->vcpu_dirty = false;
}
void hvf_cpu_synchronize_post_reset(CPUState *cpu_state)
{
run_on_cpu(cpu_state, do_hvf_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
}
void _hvf_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
{
CPUState *cpu_state = cpu;
hvf_put_registers(cpu_state);
cpu_state->vcpu_dirty = false;
}
void hvf_cpu_synchronize_post_init(CPUState *cpu_state)
{
run_on_cpu(cpu_state, _hvf_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
}
static bool ept_emulation_fault(hvf_slot *slot, addr_t gpa, uint64_t ept_qual)
{
int read, write;
/* EPT fault on an instruction fetch doesn't make sense here */
if (ept_qual & EPT_VIOLATION_INST_FETCH) {
return false;
}
/* EPT fault must be a read fault or a write fault */
read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
if ((read | write) == 0) {
return false;
}
if (write && slot) {
if (slot->flags & HVF_SLOT_LOG) {
memory_region_set_dirty(slot->region, gpa - slot->start, 1);
hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
HV_MEMORY_READ | HV_MEMORY_WRITE);
}
}
/*
* The EPT violation must have been caused by accessing a
* guest-physical address that is a translation of a guest-linear
* address.
*/
if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
(ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
return false;
}
return !slot;
}
static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
{
hvf_slot *slot;
slot = hvf_find_overlap_slot(
section->offset_within_address_space,
section->offset_within_address_space + int128_get64(section->size));
/* protect region against writes; begin tracking it */
if (on) {
slot->flags |= HVF_SLOT_LOG;
hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
HV_MEMORY_READ);
/* stop tracking region*/
} else {
slot->flags &= ~HVF_SLOT_LOG;
hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
HV_MEMORY_READ | HV_MEMORY_WRITE);
}
}
static void hvf_log_start(MemoryListener *listener,
MemoryRegionSection *section, int old, int new)
{
if (old != 0) {
return;
}
hvf_set_dirty_tracking(section, 1);
}
static void hvf_log_stop(MemoryListener *listener,
MemoryRegionSection *section, int old, int new)
{
if (new != 0) {
return;
}
hvf_set_dirty_tracking(section, 0);
}
static void hvf_log_sync(MemoryListener *listener,
MemoryRegionSection *section)
{
/*
* sync of dirty pages is handled elsewhere; just make sure we keep
* tracking the region.
*/
hvf_set_dirty_tracking(section, 1);
}
static void hvf_region_add(MemoryListener *listener,
MemoryRegionSection *section)
{
hvf_set_phys_mem(section, true);
}
static void hvf_region_del(MemoryListener *listener,
MemoryRegionSection *section)
{
hvf_set_phys_mem(section, false);
}
static MemoryListener hvf_memory_listener = {
.priority = 10,
.region_add = hvf_region_add,
.region_del = hvf_region_del,
.log_start = hvf_log_start,
.log_stop = hvf_log_stop,
.log_sync = hvf_log_sync,
};
void hvf_reset_vcpu(CPUState *cpu) {
/* TODO: this shouldn't be needed; there is already a call to
* cpu_synchronize_all_post_reset in vl.c
*/
wvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, 0);
wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, 0);
macvm_set_cr0(cpu->hvf_fd, 0x60000010);
wvmcs(cpu->hvf_fd, VMCS_CR4_MASK, CR4_VMXE_MASK);
wvmcs(cpu->hvf_fd, VMCS_CR4_SHADOW, 0x0);
wvmcs(cpu->hvf_fd, VMCS_GUEST_CR4, CR4_VMXE_MASK);
/* set VMCS guest state fields */
wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_SELECTOR, 0xf000);
wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_LIMIT, 0xffff);
wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_ACCESS_RIGHTS, 0x9b);
wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_BASE, 0xffff0000);
wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_SELECTOR, 0);
wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_LIMIT, 0xffff);
wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_ACCESS_RIGHTS, 0x93);
wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_BASE, 0);
wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_SELECTOR, 0);
wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_LIMIT, 0xffff);
wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_ACCESS_RIGHTS, 0x93);
wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_BASE, 0);
wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_SELECTOR, 0);
wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_LIMIT, 0xffff);
wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_ACCESS_RIGHTS, 0x93);
wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, 0);
wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_SELECTOR, 0);
wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_LIMIT, 0xffff);
wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_ACCESS_RIGHTS, 0x93);
wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, 0);
wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_SELECTOR, 0);
wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_LIMIT, 0xffff);
wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_ACCESS_RIGHTS, 0x93);
wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_BASE, 0);
wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_SELECTOR, 0);
wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT, 0);
wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_ACCESS_RIGHTS, 0x10000);
wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE, 0);
wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_SELECTOR, 0);
wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_LIMIT, 0);
wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_ACCESS_RIGHTS, 0x83);
wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_BASE, 0);
wvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT, 0);
wvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE, 0);
wvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_LIMIT, 0);
wvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_BASE, 0);
/*wvmcs(cpu->hvf_fd, VMCS_GUEST_CR2, 0x0);*/
wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, 0x0);
wreg(cpu->hvf_fd, HV_X86_RIP, 0xfff0);
wreg(cpu->hvf_fd, HV_X86_RDX, 0x623);
wreg(cpu->hvf_fd, HV_X86_RFLAGS, 0x2);
wreg(cpu->hvf_fd, HV_X86_RSP, 0x0);
wreg(cpu->hvf_fd, HV_X86_RAX, 0x0);
wreg(cpu->hvf_fd, HV_X86_RBX, 0x0);
wreg(cpu->hvf_fd, HV_X86_RCX, 0x0);
wreg(cpu->hvf_fd, HV_X86_RSI, 0x0);
wreg(cpu->hvf_fd, HV_X86_RDI, 0x0);
wreg(cpu->hvf_fd, HV_X86_RBP, 0x0);
for (int i = 0; i < 8; i++) {
wreg(cpu->hvf_fd, HV_X86_R8 + i, 0x0);
}
hv_vm_sync_tsc(0);
cpu->halted = 0;
hv_vcpu_invalidate_tlb(cpu->hvf_fd);
hv_vcpu_flush(cpu->hvf_fd);
}
void hvf_vcpu_destroy(CPUState *cpu)
{
hv_return_t ret = hv_vcpu_destroy((hv_vcpuid_t)cpu->hvf_fd);
assert_hvf_ok(ret);
}
static void dummy_signal(int sig)
{
}
int hvf_init_vcpu(CPUState *cpu)
{
X86CPU *x86cpu = X86_CPU(cpu);
CPUX86State *env = &x86cpu->env;
int r;
/* init cpu signals */
sigset_t set;
struct sigaction sigact;
memset(&sigact, 0, sizeof(sigact));
sigact.sa_handler = dummy_signal;
sigaction(SIG_IPI, &sigact, NULL);
pthread_sigmask(SIG_BLOCK, NULL, &set);
sigdelset(&set, SIG_IPI);
init_emu();
init_decoder();
hvf_state->hvf_caps = g_new0(struct hvf_vcpu_caps, 1);
env->hvf_emul = g_new0(HVFX86EmulatorState, 1);
r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf_fd, HV_VCPU_DEFAULT);
cpu->vcpu_dirty = 1;
assert_hvf_ok(r);
if (hv_vmx_read_capability(HV_VMX_CAP_PINBASED,
&hvf_state->hvf_caps->vmx_cap_pinbased)) {
abort();
}
if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED,
&hvf_state->hvf_caps->vmx_cap_procbased)) {
abort();
}
if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2,
&hvf_state->hvf_caps->vmx_cap_procbased2)) {
abort();
}
if (hv_vmx_read_capability(HV_VMX_CAP_ENTRY,
&hvf_state->hvf_caps->vmx_cap_entry)) {
abort();
}
/* set VMCS control fields */
wvmcs(cpu->hvf_fd, VMCS_PIN_BASED_CTLS,
cap2ctrl(hvf_state->hvf_caps->vmx_cap_pinbased,
VMCS_PIN_BASED_CTLS_EXTINT |
VMCS_PIN_BASED_CTLS_NMI |
VMCS_PIN_BASED_CTLS_VNMI));
wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS,
cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased,
VMCS_PRI_PROC_BASED_CTLS_HLT |
VMCS_PRI_PROC_BASED_CTLS_MWAIT |
VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET |
VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW) |
VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL);
wvmcs(cpu->hvf_fd, VMCS_SEC_PROC_BASED_CTLS,
cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased2,
VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES));
wvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_entry,
0));
wvmcs(cpu->hvf_fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */
wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0);
hvf_reset_vcpu(cpu);
x86cpu = X86_CPU(cpu);
x86cpu->env.kvm_xsave_buf = qemu_memalign(4096, 4096);
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_STAR, 1);
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_LSTAR, 1);
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_CSTAR, 1);
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FMASK, 1);
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FSBASE, 1);
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_GSBASE, 1);
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_KERNELGSBASE, 1);
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_TSC_AUX, 1);
/*hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_TSC, 1);*/
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_CS, 1);
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_EIP, 1);
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_ESP, 1);
return 0;
}
void hvf_disable(int shouldDisable)
{
hvf_disabled = shouldDisable;
}
static void hvf_store_events(CPUState *cpu, uint32_t ins_len, uint64_t idtvec_info)
{
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
env->exception_injected = -1;
env->interrupt_injected = -1;
env->nmi_injected = false;
if (idtvec_info & VMCS_IDT_VEC_VALID) {
switch (idtvec_info & VMCS_IDT_VEC_TYPE) {
case VMCS_IDT_VEC_HWINTR:
case VMCS_IDT_VEC_SWINTR:
env->interrupt_injected = idtvec_info & VMCS_IDT_VEC_VECNUM;
break;
case VMCS_IDT_VEC_NMI:
env->nmi_injected = true;
break;
case VMCS_IDT_VEC_HWEXCEPTION:
case VMCS_IDT_VEC_SWEXCEPTION:
env->exception_injected = idtvec_info & VMCS_IDT_VEC_VECNUM;
break;
case VMCS_IDT_VEC_PRIV_SWEXCEPTION:
default:
abort();
}
if ((idtvec_info & VMCS_IDT_VEC_TYPE) == VMCS_IDT_VEC_SWEXCEPTION ||
(idtvec_info & VMCS_IDT_VEC_TYPE) == VMCS_IDT_VEC_SWINTR) {
env->ins_len = ins_len;
}
if (idtvec_info & VMCS_INTR_DEL_ERRCODE) {
env->has_error_code = true;
env->error_code = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_ERROR);
}
}
if ((rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) &
VMCS_INTERRUPTIBILITY_NMI_BLOCKING)) {
env->hflags2 |= HF2_NMI_MASK;
} else {
env->hflags2 &= ~HF2_NMI_MASK;
}
if (rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) &
(VMCS_INTERRUPTIBILITY_STI_BLOCKING |
VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {
env->hflags |= HF_INHIBIT_IRQ_MASK;
} else {
env->hflags &= ~HF_INHIBIT_IRQ_MASK;
}
}
int hvf_vcpu_exec(CPUState *cpu)
{
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
int ret = 0;
uint64_t rip = 0;
cpu->halted = 0;
if (hvf_process_events(cpu)) {
return EXCP_HLT;
}
do {
if (cpu->vcpu_dirty) {
hvf_put_registers(cpu);
cpu->vcpu_dirty = false;
}
if (hvf_inject_interrupts(cpu)) {
return EXCP_INTERRUPT;
}
vmx_update_tpr(cpu);
qemu_mutex_unlock_iothread();
if (!cpu_is_bsp(X86_CPU(cpu)) && cpu->halted) {
qemu_mutex_lock_iothread();
return EXCP_HLT;
}
hv_return_t r = hv_vcpu_run(cpu->hvf_fd);
assert_hvf_ok(r);
/* handle VMEXIT */
uint64_t exit_reason = rvmcs(cpu->hvf_fd, VMCS_EXIT_REASON);
uint64_t exit_qual = rvmcs(cpu->hvf_fd, VMCS_EXIT_QUALIFICATION);
uint32_t ins_len = (uint32_t)rvmcs(cpu->hvf_fd,
VMCS_EXIT_INSTRUCTION_LENGTH);
uint64_t idtvec_info = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO);
hvf_store_events(cpu, ins_len, idtvec_info);
rip = rreg(cpu->hvf_fd, HV_X86_RIP);
RFLAGS(env) = rreg(cpu->hvf_fd, HV_X86_RFLAGS);
env->eflags = RFLAGS(env);
qemu_mutex_lock_iothread();
update_apic_tpr(cpu);
current_cpu = cpu;
ret = 0;
switch (exit_reason) {
case EXIT_REASON_HLT: {
macvm_set_rip(cpu, rip + ins_len);
if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
(EFLAGS(env) & IF_MASK))
&& !(cpu->interrupt_request & CPU_INTERRUPT_NMI) &&
!(idtvec_info & VMCS_IDT_VEC_VALID)) {
cpu->halted = 1;
ret = EXCP_HLT;
}
ret = EXCP_INTERRUPT;
break;
}
case EXIT_REASON_MWAIT: {
ret = EXCP_INTERRUPT;
break;
}
/* Need to check if MMIO or unmmaped fault */
case EXIT_REASON_EPT_FAULT:
{
hvf_slot *slot;
addr_t gpa = rvmcs(cpu->hvf_fd, VMCS_GUEST_PHYSICAL_ADDRESS);
if (((idtvec_info & VMCS_IDT_VEC_VALID) == 0) &&
((exit_qual & EXIT_QUAL_NMIUDTI) != 0)) {
vmx_set_nmi_blocking(cpu);
}
slot = hvf_find_overlap_slot(gpa, gpa);
/* mmio */
if (ept_emulation_fault(slot, gpa, exit_qual)) {
struct x86_decode decode;
load_regs(cpu);
env->hvf_emul->fetch_rip = rip;
decode_instruction(env, &decode);
exec_instruction(env, &decode);
store_regs(cpu);
break;
}
break;
}
case EXIT_REASON_INOUT:
{
uint32_t in = (exit_qual & 8) != 0;
uint32_t size = (exit_qual & 7) + 1;
uint32_t string = (exit_qual & 16) != 0;
uint32_t port = exit_qual >> 16;
/*uint32_t rep = (exit_qual & 0x20) != 0;*/
#if 1
if (!string && in) {
uint64_t val = 0;
load_regs(cpu);
hvf_handle_io(env, port, &val, 0, size, 1);
if (size == 1) {
AL(env) = val;
} else if (size == 2) {
AX(env) = val;
} else if (size == 4) {
RAX(env) = (uint32_t)val;
} else {
VM_PANIC("size");
}
RIP(env) += ins_len;
store_regs(cpu);
break;
} else if (!string && !in) {
RAX(env) = rreg(cpu->hvf_fd, HV_X86_RAX);
hvf_handle_io(env, port, &RAX(env), 1, size, 1);
macvm_set_rip(cpu, rip + ins_len);
break;
}
#endif
struct x86_decode decode;
load_regs(cpu);
env->hvf_emul->fetch_rip = rip;
decode_instruction(env, &decode);
VM_PANIC_ON(ins_len != decode.len);
exec_instruction(env, &decode);
store_regs(cpu);
break;
}
case EXIT_REASON_CPUID: {
uint32_t rax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX);
uint32_t rbx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RBX);
uint32_t rcx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX);
uint32_t rdx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX);
cpu_x86_cpuid(env, rax, rcx, &rax, &rbx, &rcx, &rdx);
wreg(cpu->hvf_fd, HV_X86_RAX, rax);
wreg(cpu->hvf_fd, HV_X86_RBX, rbx);
wreg(cpu->hvf_fd, HV_X86_RCX, rcx);
wreg(cpu->hvf_fd, HV_X86_RDX, rdx);
macvm_set_rip(cpu, rip + ins_len);
break;
}
case EXIT_REASON_XSETBV: {
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
uint32_t eax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX);
uint32_t ecx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX);
uint32_t edx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX);
if (ecx) {
macvm_set_rip(cpu, rip + ins_len);
break;
}
env->xcr0 = ((uint64_t)edx << 32) | eax;
wreg(cpu->hvf_fd, HV_X86_XCR0, env->xcr0 | 1);
macvm_set_rip(cpu, rip + ins_len);
break;
}
case EXIT_REASON_INTR_WINDOW:
vmx_clear_int_window_exiting(cpu);
ret = EXCP_INTERRUPT;
break;
case EXIT_REASON_NMI_WINDOW:
vmx_clear_nmi_window_exiting(cpu);
ret = EXCP_INTERRUPT;
break;
case EXIT_REASON_EXT_INTR:
/* force exit and allow io handling */
ret = EXCP_INTERRUPT;
break;
case EXIT_REASON_RDMSR:
case EXIT_REASON_WRMSR:
{
load_regs(cpu);
if (exit_reason == EXIT_REASON_RDMSR) {
simulate_rdmsr(cpu);
} else {
simulate_wrmsr(cpu);
}
RIP(env) += rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);
store_regs(cpu);
break;
}
case EXIT_REASON_CR_ACCESS: {
int cr;
int reg;
load_regs(cpu);
cr = exit_qual & 15;
reg = (exit_qual >> 8) & 15;
switch (cr) {
case 0x0: {
macvm_set_cr0(cpu->hvf_fd, RRX(env, reg));
break;
}
case 4: {
macvm_set_cr4(cpu->hvf_fd, RRX(env, reg));
break;
}
case 8: {
X86CPU *x86_cpu = X86_CPU(cpu);
if (exit_qual & 0x10) {
RRX(env, reg) = cpu_get_apic_tpr(x86_cpu->apic_state);
} else {
int tpr = RRX(env, reg);
cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
ret = EXCP_INTERRUPT;
}
break;
}
default:
error_report("Unrecognized CR %d\n", cr);
abort();
}
RIP(env) += ins_len;
store_regs(cpu);
break;
}
case EXIT_REASON_APIC_ACCESS: { /* TODO */
struct x86_decode decode;
load_regs(cpu);
env->hvf_emul->fetch_rip = rip;
decode_instruction(env, &decode);
exec_instruction(env, &decode);
store_regs(cpu);
break;
}
case EXIT_REASON_TPR: {
ret = 1;
break;
}
case EXIT_REASON_TASK_SWITCH: {
uint64_t vinfo = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO);
x68_segment_selector sel = {.sel = exit_qual & 0xffff};
vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3,
vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo
& VMCS_INTR_T_MASK);
break;
}
case EXIT_REASON_TRIPLE_FAULT: {
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
ret = EXCP_INTERRUPT;
break;
}
case EXIT_REASON_RDPMC:
wreg(cpu->hvf_fd, HV_X86_RAX, 0);
wreg(cpu->hvf_fd, HV_X86_RDX, 0);
macvm_set_rip(cpu, rip + ins_len);
break;
case VMX_REASON_VMCALL:
env->exception_injected = EXCP0D_GPF;
env->has_error_code = true;
env->error_code = 0;
break;
default:
error_report("%llx: unhandled exit %llx\n", rip, exit_reason);
}
} while (ret == 0);
return ret;
}
static bool hvf_allowed;
static int hvf_accel_init(MachineState *ms)
{
int x;
hv_return_t ret;
HVFState *s;
hvf_disable(0);
ret = hv_vm_create(HV_VM_DEFAULT);
assert_hvf_ok(ret);
s = g_new0(HVFState, 1);
s->num_slots = 32;
for (x = 0; x < s->num_slots; ++x) {
s->slots[x].size = 0;
s->slots[x].slot_id = x;
}
hvf_state = s;
cpu_interrupt_handler = hvf_handle_interrupt;
memory_listener_register(&hvf_memory_listener, &address_space_memory);
return 0;
}
static void hvf_accel_class_init(ObjectClass *oc, void *data)
{
AccelClass *ac = ACCEL_CLASS(oc);
ac->name = "HVF";
ac->init_machine = hvf_accel_init;
ac->allowed = &hvf_allowed;
}
static const TypeInfo hvf_accel_type = {
.name = TYPE_HVF_ACCEL,
.parent = TYPE_ACCEL,
.class_init = hvf_accel_class_init,
};
static void hvf_type_init(void)
{
type_register_static(&hvf_accel_type);
}
type_init(hvf_type_init);

374
target/i386/hvf/vmcs.h Normal file
View file

@ -0,0 +1,374 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _VMCS_H_
#define _VMCS_H_
#include <Hypervisor/hv.h>
#include <Hypervisor/hv_vmx.h>
#define VMCS_INITIAL 0xffffffffffffffff
#define VMCS_IDENT(encoding) ((encoding) | 0x80000000)
/*
* VMCS field encodings from Appendix H, Intel Architecture Manual Vol3B.
*/
#define VMCS_INVALID_ENCODING 0xffffffff
/* 16-bit control fields */
#define VMCS_VPID 0x00000000
#define VMCS_PIR_VECTOR 0x00000002
/* 16-bit guest-state fields */
#define VMCS_GUEST_ES_SELECTOR 0x00000800
#define VMCS_GUEST_CS_SELECTOR 0x00000802
#define VMCS_GUEST_SS_SELECTOR 0x00000804
#define VMCS_GUEST_DS_SELECTOR 0x00000806
#define VMCS_GUEST_FS_SELECTOR 0x00000808
#define VMCS_GUEST_GS_SELECTOR 0x0000080A
#define VMCS_GUEST_LDTR_SELECTOR 0x0000080C
#define VMCS_GUEST_TR_SELECTOR 0x0000080E
#define VMCS_GUEST_INTR_STATUS 0x00000810
/* 16-bit host-state fields */
#define VMCS_HOST_ES_SELECTOR 0x00000C00
#define VMCS_HOST_CS_SELECTOR 0x00000C02
#define VMCS_HOST_SS_SELECTOR 0x00000C04
#define VMCS_HOST_DS_SELECTOR 0x00000C06
#define VMCS_HOST_FS_SELECTOR 0x00000C08
#define VMCS_HOST_GS_SELECTOR 0x00000C0A
#define VMCS_HOST_TR_SELECTOR 0x00000C0C
/* 64-bit control fields */
#define VMCS_IO_BITMAP_A 0x00002000
#define VMCS_IO_BITMAP_B 0x00002002
#define VMCS_MSR_BITMAP 0x00002004
#define VMCS_EXIT_MSR_STORE 0x00002006
#define VMCS_EXIT_MSR_LOAD 0x00002008
#define VMCS_ENTRY_MSR_LOAD 0x0000200A
#define VMCS_EXECUTIVE_VMCS 0x0000200C
#define VMCS_TSC_OFFSET 0x00002010
#define VMCS_VIRTUAL_APIC 0x00002012
#define VMCS_APIC_ACCESS 0x00002014
#define VMCS_PIR_DESC 0x00002016
#define VMCS_EPTP 0x0000201A
#define VMCS_EOI_EXIT0 0x0000201C
#define VMCS_EOI_EXIT1 0x0000201E
#define VMCS_EOI_EXIT2 0x00002020
#define VMCS_EOI_EXIT3 0x00002022
#define VMCS_EOI_EXIT(vector) (VMCS_EOI_EXIT0 + ((vector) / 64) * 2)
/* 64-bit read-only fields */
#define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400
/* 64-bit guest-state fields */
#define VMCS_LINK_POINTER 0x00002800
#define VMCS_GUEST_IA32_DEBUGCTL 0x00002802
#define VMCS_GUEST_IA32_PAT 0x00002804
#define VMCS_GUEST_IA32_EFER 0x00002806
#define VMCS_GUEST_IA32_PERF_GLOBAL_CTRL 0x00002808
#define VMCS_GUEST_PDPTE0 0x0000280A
#define VMCS_GUEST_PDPTE1 0x0000280C
#define VMCS_GUEST_PDPTE2 0x0000280E
#define VMCS_GUEST_PDPTE3 0x00002810
/* 64-bit host-state fields */
#define VMCS_HOST_IA32_PAT 0x00002C00
#define VMCS_HOST_IA32_EFER 0x00002C02
#define VMCS_HOST_IA32_PERF_GLOBAL_CTRL 0x00002C04
/* 32-bit control fields */
#define VMCS_PIN_BASED_CTLS 0x00004000
#define VMCS_PRI_PROC_BASED_CTLS 0x00004002
#define VMCS_EXCEPTION_BITMAP 0x00004004
#define VMCS_PF_ERROR_MASK 0x00004006
#define VMCS_PF_ERROR_MATCH 0x00004008
#define VMCS_CR3_TARGET_COUNT 0x0000400A
#define VMCS_EXIT_CTLS 0x0000400C
#define VMCS_EXIT_MSR_STORE_COUNT 0x0000400E
#define VMCS_EXIT_MSR_LOAD_COUNT 0x00004010
#define VMCS_ENTRY_CTLS 0x00004012
#define VMCS_ENTRY_MSR_LOAD_COUNT 0x00004014
#define VMCS_ENTRY_INTR_INFO 0x00004016
#define VMCS_ENTRY_EXCEPTION_ERROR 0x00004018
#define VMCS_ENTRY_INST_LENGTH 0x0000401A
#define VMCS_TPR_THRESHOLD 0x0000401C
#define VMCS_SEC_PROC_BASED_CTLS 0x0000401E
#define VMCS_PLE_GAP 0x00004020
#define VMCS_PLE_WINDOW 0x00004022
/* 32-bit read-only data fields */
#define VMCS_INSTRUCTION_ERROR 0x00004400
#define VMCS_EXIT_REASON 0x00004402
#define VMCS_EXIT_INTR_INFO 0x00004404
#define VMCS_EXIT_INTR_ERRCODE 0x00004406
#define VMCS_IDT_VECTORING_INFO 0x00004408
#define VMCS_IDT_VECTORING_ERROR 0x0000440A
#define VMCS_EXIT_INSTRUCTION_LENGTH 0x0000440C
#define VMCS_EXIT_INSTRUCTION_INFO 0x0000440E
/* 32-bit guest-state fields */
#define VMCS_GUEST_ES_LIMIT 0x00004800
#define VMCS_GUEST_CS_LIMIT 0x00004802
#define VMCS_GUEST_SS_LIMIT 0x00004804
#define VMCS_GUEST_DS_LIMIT 0x00004806
#define VMCS_GUEST_FS_LIMIT 0x00004808
#define VMCS_GUEST_GS_LIMIT 0x0000480A
#define VMCS_GUEST_LDTR_LIMIT 0x0000480C
#define VMCS_GUEST_TR_LIMIT 0x0000480E
#define VMCS_GUEST_GDTR_LIMIT 0x00004810
#define VMCS_GUEST_IDTR_LIMIT 0x00004812
#define VMCS_GUEST_ES_ACCESS_RIGHTS 0x00004814
#define VMCS_GUEST_CS_ACCESS_RIGHTS 0x00004816
#define VMCS_GUEST_SS_ACCESS_RIGHTS 0x00004818
#define VMCS_GUEST_DS_ACCESS_RIGHTS 0x0000481A
#define VMCS_GUEST_FS_ACCESS_RIGHTS 0x0000481C
#define VMCS_GUEST_GS_ACCESS_RIGHTS 0x0000481E
#define VMCS_GUEST_LDTR_ACCESS_RIGHTS 0x00004820
#define VMCS_GUEST_TR_ACCESS_RIGHTS 0x00004822
#define VMCS_GUEST_INTERRUPTIBILITY 0x00004824
#define VMCS_GUEST_ACTIVITY 0x00004826
#define VMCS_GUEST_SMBASE 0x00004828
#define VMCS_GUEST_IA32_SYSENTER_CS 0x0000482A
#define VMCS_PREEMPTION_TIMER_VALUE 0x0000482E
/* 32-bit host state fields */
#define VMCS_HOST_IA32_SYSENTER_CS 0x00004C00
/* Natural Width control fields */
#define VMCS_CR0_MASK 0x00006000
#define VMCS_CR4_MASK 0x00006002
#define VMCS_CR0_SHADOW 0x00006004
#define VMCS_CR4_SHADOW 0x00006006
#define VMCS_CR3_TARGET0 0x00006008
#define VMCS_CR3_TARGET1 0x0000600A
#define VMCS_CR3_TARGET2 0x0000600C
#define VMCS_CR3_TARGET3 0x0000600E
/* Natural Width read-only fields */
#define VMCS_EXIT_QUALIFICATION 0x00006400
#define VMCS_IO_RCX 0x00006402
#define VMCS_IO_RSI 0x00006404
#define VMCS_IO_RDI 0x00006406
#define VMCS_IO_RIP 0x00006408
#define VMCS_GUEST_LINEAR_ADDRESS 0x0000640A
/* Natural Width guest-state fields */
#define VMCS_GUEST_CR0 0x00006800
#define VMCS_GUEST_CR3 0x00006802
#define VMCS_GUEST_CR4 0x00006804
#define VMCS_GUEST_ES_BASE 0x00006806
#define VMCS_GUEST_CS_BASE 0x00006808
#define VMCS_GUEST_SS_BASE 0x0000680A
#define VMCS_GUEST_DS_BASE 0x0000680C
#define VMCS_GUEST_FS_BASE 0x0000680E
#define VMCS_GUEST_GS_BASE 0x00006810
#define VMCS_GUEST_LDTR_BASE 0x00006812
#define VMCS_GUEST_TR_BASE 0x00006814
#define VMCS_GUEST_GDTR_BASE 0x00006816
#define VMCS_GUEST_IDTR_BASE 0x00006818
#define VMCS_GUEST_DR7 0x0000681A
#define VMCS_GUEST_RSP 0x0000681C
#define VMCS_GUEST_RIP 0x0000681E
#define VMCS_GUEST_RFLAGS 0x00006820
#define VMCS_GUEST_PENDING_DBG_EXCEPTIONS 0x00006822
#define VMCS_GUEST_IA32_SYSENTER_ESP 0x00006824
#define VMCS_GUEST_IA32_SYSENTER_EIP 0x00006826
/* Natural Width host-state fields */
#define VMCS_HOST_CR0 0x00006C00
#define VMCS_HOST_CR3 0x00006C02
#define VMCS_HOST_CR4 0x00006C04
#define VMCS_HOST_FS_BASE 0x00006C06
#define VMCS_HOST_GS_BASE 0x00006C08
#define VMCS_HOST_TR_BASE 0x00006C0A
#define VMCS_HOST_GDTR_BASE 0x00006C0C
#define VMCS_HOST_IDTR_BASE 0x00006C0E
#define VMCS_HOST_IA32_SYSENTER_ESP 0x00006C10
#define VMCS_HOST_IA32_SYSENTER_EIP 0x00006C12
#define VMCS_HOST_RSP 0x00006C14
#define VMCS_HOST_RIP 0x00006c16
/*
* VM instruction error numbers
*/
#define VMRESUME_WITH_NON_LAUNCHED_VMCS 5
/*
* VMCS exit reasons
*/
#define EXIT_REASON_EXCEPTION 0
#define EXIT_REASON_EXT_INTR 1
#define EXIT_REASON_TRIPLE_FAULT 2
#define EXIT_REASON_INIT 3
#define EXIT_REASON_SIPI 4
#define EXIT_REASON_IO_SMI 5
#define EXIT_REASON_SMI 6
#define EXIT_REASON_INTR_WINDOW 7
#define EXIT_REASON_NMI_WINDOW 8
#define EXIT_REASON_TASK_SWITCH 9
#define EXIT_REASON_CPUID 10
#define EXIT_REASON_GETSEC 11
#define EXIT_REASON_HLT 12
#define EXIT_REASON_INVD 13
#define EXIT_REASON_INVLPG 14
#define EXIT_REASON_RDPMC 15
#define EXIT_REASON_RDTSC 16
#define EXIT_REASON_RSM 17
#define EXIT_REASON_VMCALL 18
#define EXIT_REASON_VMCLEAR 19
#define EXIT_REASON_VMLAUNCH 20
#define EXIT_REASON_VMPTRLD 21
#define EXIT_REASON_VMPTRST 22
#define EXIT_REASON_VMREAD 23
#define EXIT_REASON_VMRESUME 24
#define EXIT_REASON_VMWRITE 25
#define EXIT_REASON_VMXOFF 26
#define EXIT_REASON_VMXON 27
#define EXIT_REASON_CR_ACCESS 28
#define EXIT_REASON_DR_ACCESS 29
#define EXIT_REASON_INOUT 30
#define EXIT_REASON_RDMSR 31
#define EXIT_REASON_WRMSR 32
#define EXIT_REASON_INVAL_VMCS 33
#define EXIT_REASON_INVAL_MSR 34
#define EXIT_REASON_MWAIT 36
#define EXIT_REASON_MTF 37
#define EXIT_REASON_MONITOR 39
#define EXIT_REASON_PAUSE 40
#define EXIT_REASON_MCE_DURING_ENTR 41
#define EXIT_REASON_TPR 43
#define EXIT_REASON_APIC_ACCESS 44
#define EXIT_REASON_VIRTUALIZED_EOI 45
#define EXIT_REASON_GDTR_IDTR 46
#define EXIT_REASON_LDTR_TR 47
#define EXIT_REASON_EPT_FAULT 48
#define EXIT_REASON_EPT_MISCONFIG 49
#define EXIT_REASON_INVEPT 50
#define EXIT_REASON_RDTSCP 51
#define EXIT_REASON_VMX_PREEMPT 52
#define EXIT_REASON_INVVPID 53
#define EXIT_REASON_WBINVD 54
#define EXIT_REASON_XSETBV 55
#define EXIT_REASON_APIC_WRITE 56
/*
* NMI unblocking due to IRET.
*
* Applies to VM-exits due to hardware exception or EPT fault.
*/
#define EXIT_QUAL_NMIUDTI (1 << 12)
/*
* VMCS interrupt information fields
*/
#define VMCS_INTR_VALID (1U << 31)
#define VMCS_INTR_T_MASK 0x700 /* Interruption-info type */
#define VMCS_INTR_T_HWINTR (0 << 8)
#define VMCS_INTR_T_NMI (2 << 8)
#define VMCS_INTR_T_HWEXCEPTION (3 << 8)
#define VMCS_INTR_T_SWINTR (4 << 8)
#define VMCS_INTR_T_PRIV_SWEXCEPTION (5 << 8)
#define VMCS_INTR_T_SWEXCEPTION (6 << 8)
#define VMCS_INTR_DEL_ERRCODE (1 << 11)
/*
* VMCS IDT-Vectoring information fields
*/
#define VMCS_IDT_VEC_VECNUM 0xFF
#define VMCS_IDT_VEC_VALID (1U << 31)
#define VMCS_IDT_VEC_TYPE 0x700
#define VMCS_IDT_VEC_ERRCODE_VALID (1U << 11)
#define VMCS_IDT_VEC_HWINTR (0 << 8)
#define VMCS_IDT_VEC_NMI (2 << 8)
#define VMCS_IDT_VEC_HWEXCEPTION (3 << 8)
#define VMCS_IDT_VEC_SWINTR (4 << 8)
#define VMCS_IDT_VEC_PRIV_SWEXCEPTION (5 << 8)
#define VMCS_IDT_VEC_SWEXCEPTION (6 << 8)
/*
* VMCS Guest interruptibility field
*/
#define VMCS_INTERRUPTIBILITY_STI_BLOCKING (1 << 0)
#define VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING (1 << 1)
#define VMCS_INTERRUPTIBILITY_SMI_BLOCKING (1 << 2)
#define VMCS_INTERRUPTIBILITY_NMI_BLOCKING (1 << 3)
/*
* Exit qualification for EXIT_REASON_INVAL_VMCS
*/
#define EXIT_QUAL_NMI_WHILE_STI_BLOCKING 3
/*
* Exit qualification for EPT violation
*/
#define EPT_VIOLATION_DATA_READ (1UL << 0)
#define EPT_VIOLATION_DATA_WRITE (1UL << 1)
#define EPT_VIOLATION_INST_FETCH (1UL << 2)
#define EPT_VIOLATION_GPA_READABLE (1UL << 3)
#define EPT_VIOLATION_GPA_WRITEABLE (1UL << 4)
#define EPT_VIOLATION_GPA_EXECUTABLE (1UL << 5)
#define EPT_VIOLATION_GLA_VALID (1UL << 7)
#define EPT_VIOLATION_XLAT_VALID (1UL << 8)
/*
* Exit qualification for APIC-access VM exit
*/
#define APIC_ACCESS_OFFSET(qual) ((qual) & 0xFFF)
#define APIC_ACCESS_TYPE(qual) (((qual) >> 12) & 0xF)
/*
* Exit qualification for APIC-write VM exit
*/
#define APIC_WRITE_OFFSET(qual) ((qual) & 0xFFF)
#define VMCS_PIN_BASED_CTLS_EXTINT (1 << 0)
#define VMCS_PIN_BASED_CTLS_NMI (1 << 3)
#define VMCS_PIN_BASED_CTLS_VNMI (1 << 5)
#define VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING (1 << 2)
#define VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET (1 << 3)
#define VMCS_PRI_PROC_BASED_CTLS_HLT (1 << 7)
#define VMCS_PRI_PROC_BASED_CTLS_MWAIT (1 << 10)
#define VMCS_PRI_PROC_BASED_CTLS_TSC (1 << 12)
#define VMCS_PRI_PROC_BASED_CTLS_CR8_LOAD (1 << 19)
#define VMCS_PRI_PROC_BASED_CTLS_CR8_STORE (1 << 20)
#define VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW (1 << 21)
#define VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING (1 << 22)
#define VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL (1 << 31)
#define VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES (1 << 0)
#define VMCS_PRI_PROC_BASED2_CTLS_X2APIC (1 << 4)
enum task_switch_reason {
TSR_CALL,
TSR_IRET,
TSR_JMP,
TSR_IDT_GATE, /* task gate in IDT */
};
#endif

222
target/i386/hvf/vmx.h Normal file
View file

@ -0,0 +1,222 @@
/*
* Copyright (C) 2016 Veertu Inc,
* Copyright (C) 2017 Google Inc,
* Based on Veertu vddh/vmm/vmx.h
*
* Interfaces to Hypervisor.framework to read/write X86 registers and VMCS.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef VMX_H
#define VMX_H
#include <stdint.h>
#include <Hypervisor/hv.h>
#include <Hypervisor/hv_vmx.h>
#include "vmcs.h"
#include "cpu.h"
#include "x86.h"
#include "exec/address-spaces.h"
static inline uint64_t rreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg)
{
uint64_t v;
if (hv_vcpu_read_register(vcpu, reg, &v)) {
abort();
}
return v;
}
/* write GPR */
static inline void wreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg, uint64_t v)
{
if (hv_vcpu_write_register(vcpu, reg, v)) {
abort();
}
}
/* read VMCS field */
static inline uint64_t rvmcs(hv_vcpuid_t vcpu, uint32_t field)
{
uint64_t v;
hv_vmx_vcpu_read_vmcs(vcpu, field, &v);
return v;
}
/* write VMCS field */
static inline void wvmcs(hv_vcpuid_t vcpu, uint32_t field, uint64_t v)
{
hv_vmx_vcpu_write_vmcs(vcpu, field, v);
}
/* desired control word constrained by hardware/hypervisor capabilities */
static inline uint64_t cap2ctrl(uint64_t cap, uint64_t ctrl)
{
return (ctrl | (cap & 0xffffffff)) & (cap >> 32);
}
#define VM_ENTRY_GUEST_LMA (1LL << 9)
#define AR_TYPE_ACCESSES_MASK 1
#define AR_TYPE_READABLE_MASK (1 << 1)
#define AR_TYPE_WRITEABLE_MASK (1 << 2)
#define AR_TYPE_CODE_MASK (1 << 3)
#define AR_TYPE_MASK 0x0f
#define AR_TYPE_BUSY_64_TSS 11
#define AR_TYPE_BUSY_32_TSS 11
#define AR_TYPE_BUSY_16_TSS 3
#define AR_TYPE_LDT 2
static void enter_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer)
{
uint64_t entry_ctls;
efer |= EFER_LMA;
wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer);
entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);
wvmcs(vcpu, VMCS_ENTRY_CTLS, rvmcs(vcpu, VMCS_ENTRY_CTLS) |
VM_ENTRY_GUEST_LMA);
uint64_t guest_tr_ar = rvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS);
if ((efer & EFER_LME) &&
(guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
wvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS,
(guest_tr_ar & ~AR_TYPE_MASK) | AR_TYPE_BUSY_64_TSS);
}
}
static void exit_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer)
{
uint64_t entry_ctls;
entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);
wvmcs(vcpu, VMCS_ENTRY_CTLS, entry_ctls & ~VM_ENTRY_GUEST_LMA);
efer &= ~EFER_LMA;
wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer);
}
static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)
{
int i;
uint64_t pdpte[4] = {0, 0, 0, 0};
uint64_t efer = rvmcs(vcpu, VMCS_GUEST_IA32_EFER);
uint64_t old_cr0 = rvmcs(vcpu, VMCS_GUEST_CR0);
if ((cr0 & CR0_PG) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE) &&
!(efer & EFER_LME)) {
address_space_rw(&address_space_memory,
rvmcs(vcpu, VMCS_GUEST_CR3) & ~0x1f,
MEMTXATTRS_UNSPECIFIED,
(uint8_t *)pdpte, 32, 0);
}
for (i = 0; i < 4; i++) {
wvmcs(vcpu, VMCS_GUEST_PDPTE0 + i * 2, pdpte[i]);
}
wvmcs(vcpu, VMCS_CR0_MASK, CR0_CD | CR0_NE | CR0_PG);
wvmcs(vcpu, VMCS_CR0_SHADOW, cr0);
cr0 &= ~CR0_CD;
wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE | CR0_ET);
if (efer & EFER_LME) {
if (!(old_cr0 & CR0_PG) && (cr0 & CR0_PG)) {
enter_long_mode(vcpu, cr0, efer);
}
if (/*(old_cr0 & CR0_PG) &&*/ !(cr0 & CR0_PG)) {
exit_long_mode(vcpu, cr0, efer);
}
}
hv_vcpu_invalidate_tlb(vcpu);
hv_vcpu_flush(vcpu);
}
static inline void macvm_set_cr4(hv_vcpuid_t vcpu, uint64_t cr4)
{
uint64_t guest_cr4 = cr4 | CR4_VMXE;
wvmcs(vcpu, VMCS_GUEST_CR4, guest_cr4);
wvmcs(vcpu, VMCS_CR4_SHADOW, cr4);
hv_vcpu_invalidate_tlb(vcpu);
hv_vcpu_flush(vcpu);
}
static inline void macvm_set_rip(CPUState *cpu, uint64_t rip)
{
uint64_t val;
/* BUG, should take considering overlap.. */
wreg(cpu->hvf_fd, HV_X86_RIP, rip);
/* after moving forward in rip, we need to clean INTERRUPTABILITY */
val = rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
if (val & (VMCS_INTERRUPTIBILITY_STI_BLOCKING |
VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {
wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY,
val & ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING |
VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING));
}
}
static inline void vmx_clear_nmi_blocking(CPUState *cpu)
{
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
env->hflags2 &= ~HF2_NMI_MASK;
uint32_t gi = (uint32_t) rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
}
static inline void vmx_set_nmi_blocking(CPUState *cpu)
{
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
env->hflags2 |= HF2_NMI_MASK;
uint32_t gi = (uint32_t)rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
}
static inline void vmx_set_nmi_window_exiting(CPUState *cpu)
{
uint64_t val;
val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val |
VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);
}
static inline void vmx_clear_nmi_window_exiting(CPUState *cpu)
{
uint64_t val;
val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val &
~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);
}
#endif

184
target/i386/hvf/x86.c Normal file
View file

@ -0,0 +1,184 @@
/*
* Copyright (C) 2016 Veertu Inc,
* Copyright (C) 2017 Google Inc,
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "x86_decode.h"
#include "x86_emu.h"
#include "vmcs.h"
#include "vmx.h"
#include "x86_mmu.h"
#include "x86_descr.h"
/* static uint32_t x86_segment_access_rights(struct x86_segment_descriptor *var)
{
uint32_t ar;
if (!var->p) {
ar = 1 << 16;
return ar;
}
ar = var->type & 15;
ar |= (var->s & 1) << 4;
ar |= (var->dpl & 3) << 5;
ar |= (var->p & 1) << 7;
ar |= (var->avl & 1) << 12;
ar |= (var->l & 1) << 13;
ar |= (var->db & 1) << 14;
ar |= (var->g & 1) << 15;
return ar;
}*/
bool x86_read_segment_descriptor(struct CPUState *cpu,
struct x86_segment_descriptor *desc,
x68_segment_selector sel)
{
addr_t base;
uint32_t limit;
ZERO_INIT(*desc);
/* valid gdt descriptors start from index 1 */
if (!sel.index && GDT_SEL == sel.ti) {
return false;
}
if (GDT_SEL == sel.ti) {
base = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE);
limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT);
} else {
base = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE);
limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT);
}
if (sel.index * 8 >= limit) {
return false;
}
vmx_read_mem(cpu, desc, base + sel.index * 8, sizeof(*desc));
return true;
}
bool x86_write_segment_descriptor(struct CPUState *cpu,
struct x86_segment_descriptor *desc,
x68_segment_selector sel)
{
addr_t base;
uint32_t limit;
if (GDT_SEL == sel.ti) {
base = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE);
limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT);
} else {
base = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE);
limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT);
}
if (sel.index * 8 >= limit) {
printf("%s: gdt limit\n", __func__);
return false;
}
vmx_write_mem(cpu, base + sel.index * 8, desc, sizeof(*desc));
return true;
}
bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
int gate)
{
addr_t base = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_BASE);
uint32_t limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_LIMIT);
ZERO_INIT(*idt_desc);
if (gate * 8 >= limit) {
printf("%s: idt limit\n", __func__);
return false;
}
vmx_read_mem(cpu, idt_desc, base + gate * 8, sizeof(*idt_desc));
return true;
}
bool x86_is_protected(struct CPUState *cpu)
{
uint64_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);
return cr0 & CR0_PE;
}
bool x86_is_real(struct CPUState *cpu)
{
return !x86_is_protected(cpu);
}
bool x86_is_v8086(struct CPUState *cpu)
{
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
return x86_is_protected(cpu) && (RFLAGS(env) & RFLAGS_VM);
}
bool x86_is_long_mode(struct CPUState *cpu)
{
return rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER) & EFER_LMA;
}
bool x86_is_long64_mode(struct CPUState *cpu)
{
struct vmx_segment desc;
vmx_read_segment_descriptor(cpu, &desc, REG_SEG_CS);
return x86_is_long_mode(cpu) && ((desc.ar >> 13) & 1);
}
bool x86_is_paging_mode(struct CPUState *cpu)
{
uint64_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);
return cr0 & CR0_PG;
}
bool x86_is_pae_enabled(struct CPUState *cpu)
{
uint64_t cr4 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR4);
return cr4 & CR4_PAE;
}
addr_t linear_addr(struct CPUState *cpu, addr_t addr, x86_reg_segment seg)
{
return vmx_read_segment_base(cpu, seg) + addr;
}
addr_t linear_addr_size(struct CPUState *cpu, addr_t addr, int size,
x86_reg_segment seg)
{
switch (size) {
case 2:
addr = (uint16_t)addr;
break;
case 4:
addr = (uint32_t)addr;
break;
default:
break;
}
return linear_addr(cpu, addr, seg);
}
addr_t linear_rip(struct CPUState *cpu, addr_t rip)
{
return linear_addr(cpu, rip, REG_SEG_CS);
}

469
target/i386/hvf/x86.h Normal file
View file

@ -0,0 +1,469 @@
/*
* Copyright (C) 2016 Veertu Inc,
* Copyright (C) 2017 Veertu Inc,
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <sys/types.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <stdarg.h>
#include "qemu-common.h"
#include "x86_gen.h"
/* exceptions */
typedef enum x86_exception {
EXCEPTION_DE, /* divide error */
EXCEPTION_DB, /* debug fault */
EXCEPTION_NMI, /* non-maskable interrupt */
EXCEPTION_BP, /* breakpoint trap */
EXCEPTION_OF, /* overflow trap */
EXCEPTION_BR, /* boundary range exceeded fault */
EXCEPTION_UD, /* undefined opcode */
EXCEPTION_NM, /* device not available */
EXCEPTION_DF, /* double fault */
EXCEPTION_RSVD, /* not defined */
EXCEPTION_TS, /* invalid TSS fault */
EXCEPTION_NP, /* not present fault */
EXCEPTION_GP, /* general protection fault */
EXCEPTION_PF, /* page fault */
EXCEPTION_RSVD2, /* not defined */
} x86_exception;
/* general purpose regs */
typedef enum x86_reg_name {
REG_RAX = 0,
REG_RCX = 1,
REG_RDX = 2,
REG_RBX = 3,
REG_RSP = 4,
REG_RBP = 5,
REG_RSI = 6,
REG_RDI = 7,
REG_R8 = 8,
REG_R9 = 9,
REG_R10 = 10,
REG_R11 = 11,
REG_R12 = 12,
REG_R13 = 13,
REG_R14 = 14,
REG_R15 = 15,
} x86_reg_name;
/* segment regs */
typedef enum x86_reg_segment {
REG_SEG_ES = 0,
REG_SEG_CS = 1,
REG_SEG_SS = 2,
REG_SEG_DS = 3,
REG_SEG_FS = 4,
REG_SEG_GS = 5,
REG_SEG_LDTR = 6,
REG_SEG_TR = 7,
} x86_reg_segment;
typedef struct x86_register {
union {
struct {
uint64_t rrx; /* full 64 bit */
};
struct {
uint32_t erx; /* low 32 bit part */
uint32_t hi32_unused1;
};
struct {
uint16_t rx; /* low 16 bit part */
uint16_t hi16_unused1;
uint32_t hi32_unused2;
};
struct {
uint8_t lx; /* low 8 bit part */
uint8_t hx; /* high 8 bit */
uint16_t hi16_unused2;
uint32_t hi32_unused3;
};
};
} __attribute__ ((__packed__)) x86_register;
typedef enum x86_rflags {
RFLAGS_CF = (1L << 0),
RFLAGS_PF = (1L << 2),
RFLAGS_AF = (1L << 4),
RFLAGS_ZF = (1L << 6),
RFLAGS_SF = (1L << 7),
RFLAGS_TF = (1L << 8),
RFLAGS_IF = (1L << 9),
RFLAGS_DF = (1L << 10),
RFLAGS_OF = (1L << 11),
RFLAGS_IOPL = (3L << 12),
RFLAGS_NT = (1L << 14),
RFLAGS_RF = (1L << 16),
RFLAGS_VM = (1L << 17),
RFLAGS_AC = (1L << 18),
RFLAGS_VIF = (1L << 19),
RFLAGS_VIP = (1L << 20),
RFLAGS_ID = (1L << 21),
} x86_rflags;
/* rflags register */
typedef struct x86_reg_flags {
union {
struct {
uint64_t rflags;
};
struct {
uint32_t eflags;
uint32_t hi32_unused1;
};
struct {
uint32_t cf:1;
uint32_t unused1:1;
uint32_t pf:1;
uint32_t unused2:1;
uint32_t af:1;
uint32_t unused3:1;
uint32_t zf:1;
uint32_t sf:1;
uint32_t tf:1;
uint32_t ief:1;
uint32_t df:1;
uint32_t of:1;
uint32_t iopl:2;
uint32_t nt:1;
uint32_t unused4:1;
uint32_t rf:1;
uint32_t vm:1;
uint32_t ac:1;
uint32_t vif:1;
uint32_t vip:1;
uint32_t id:1;
uint32_t unused5:10;
uint32_t hi32_unused2;
};
};
} __attribute__ ((__packed__)) x86_reg_flags;
typedef enum x86_reg_efer {
EFER_SCE = (1L << 0),
EFER_LME = (1L << 8),
EFER_LMA = (1L << 10),
EFER_NXE = (1L << 11),
EFER_SVME = (1L << 12),
EFER_FXSR = (1L << 14),
} x86_reg_efer;
typedef struct x86_efer {
uint64_t efer;
} __attribute__ ((__packed__)) x86_efer;
typedef enum x86_reg_cr0 {
CR0_PE = (1L << 0),
CR0_MP = (1L << 1),
CR0_EM = (1L << 2),
CR0_TS = (1L << 3),
CR0_ET = (1L << 4),
CR0_NE = (1L << 5),
CR0_WP = (1L << 16),
CR0_AM = (1L << 18),
CR0_NW = (1L << 29),
CR0_CD = (1L << 30),
CR0_PG = (1L << 31),
} x86_reg_cr0;
typedef enum x86_reg_cr4 {
CR4_VME = (1L << 0),
CR4_PVI = (1L << 1),
CR4_TSD = (1L << 2),
CR4_DE = (1L << 3),
CR4_PSE = (1L << 4),
CR4_PAE = (1L << 5),
CR4_MSE = (1L << 6),
CR4_PGE = (1L << 7),
CR4_PCE = (1L << 8),
CR4_OSFXSR = (1L << 9),
CR4_OSXMMEXCPT = (1L << 10),
CR4_VMXE = (1L << 13),
CR4_SMXE = (1L << 14),
CR4_FSGSBASE = (1L << 16),
CR4_PCIDE = (1L << 17),
CR4_OSXSAVE = (1L << 18),
CR4_SMEP = (1L << 20),
} x86_reg_cr4;
/* 16 bit Task State Segment */
typedef struct x86_tss_segment16 {
uint16_t link;
uint16_t sp0;
uint16_t ss0;
uint32_t sp1;
uint16_t ss1;
uint32_t sp2;
uint16_t ss2;
uint16_t ip;
uint16_t flags;
uint16_t ax;
uint16_t cx;
uint16_t dx;
uint16_t bx;
uint16_t sp;
uint16_t bp;
uint16_t si;
uint16_t di;
uint16_t es;
uint16_t cs;
uint16_t ss;
uint16_t ds;
uint16_t ldtr;
} __attribute__((packed)) x86_tss_segment16;
/* 32 bit Task State Segment */
typedef struct x86_tss_segment32 {
uint32_t prev_tss;
uint32_t esp0;
uint32_t ss0;
uint32_t esp1;
uint32_t ss1;
uint32_t esp2;
uint32_t ss2;
uint32_t cr3;
uint32_t eip;
uint32_t eflags;
uint32_t eax;
uint32_t ecx;
uint32_t edx;
uint32_t ebx;
uint32_t esp;
uint32_t ebp;
uint32_t esi;
uint32_t edi;
uint32_t es;
uint32_t cs;
uint32_t ss;
uint32_t ds;
uint32_t fs;
uint32_t gs;
uint32_t ldt;
uint16_t trap;
uint16_t iomap_base;
} __attribute__ ((__packed__)) x86_tss_segment32;
/* 64 bit Task State Segment */
typedef struct x86_tss_segment64 {
uint32_t unused;
uint64_t rsp0;
uint64_t rsp1;
uint64_t rsp2;
uint64_t unused1;
uint64_t ist1;
uint64_t ist2;
uint64_t ist3;
uint64_t ist4;
uint64_t ist5;
uint64_t ist6;
uint64_t ist7;
uint64_t unused2;
uint16_t unused3;
uint16_t iomap_base;
} __attribute__ ((__packed__)) x86_tss_segment64;
/* segment descriptors */
typedef struct x86_segment_descriptor {
uint64_t limit0:16;
uint64_t base0:16;
uint64_t base1:8;
uint64_t type:4;
uint64_t s:1;
uint64_t dpl:2;
uint64_t p:1;
uint64_t limit1:4;
uint64_t avl:1;
uint64_t l:1;
uint64_t db:1;
uint64_t g:1;
uint64_t base2:8;
} __attribute__ ((__packed__)) x86_segment_descriptor;
static inline uint32_t x86_segment_base(x86_segment_descriptor *desc)
{
return (uint32_t)((desc->base2 << 24) | (desc->base1 << 16) | desc->base0);
}
static inline void x86_set_segment_base(x86_segment_descriptor *desc,
uint32_t base)
{
desc->base2 = base >> 24;
desc->base1 = (base >> 16) & 0xff;
desc->base0 = base & 0xffff;
}
static inline uint32_t x86_segment_limit(x86_segment_descriptor *desc)
{
uint32_t limit = (uint32_t)((desc->limit1 << 16) | desc->limit0);
if (desc->g) {
return (limit << 12) | 0xfff;
}
return limit;
}
static inline void x86_set_segment_limit(x86_segment_descriptor *desc,
uint32_t limit)
{
desc->limit0 = limit & 0xffff;
desc->limit1 = limit >> 16;
}
typedef struct x86_call_gate {
uint64_t offset0:16;
uint64_t selector:16;
uint64_t param_count:4;
uint64_t reserved:3;
uint64_t type:4;
uint64_t dpl:1;
uint64_t p:1;
uint64_t offset1:16;
} __attribute__ ((__packed__)) x86_call_gate;
static inline uint32_t x86_call_gate_offset(x86_call_gate *gate)
{
return (uint32_t)((gate->offset1 << 16) | gate->offset0);
}
#define LDT_SEL 0
#define GDT_SEL 1
typedef struct x68_segment_selector {
union {
uint16_t sel;
struct {
uint16_t rpl:3;
uint16_t ti:1;
uint16_t index:12;
};
};
} __attribute__ ((__packed__)) x68_segment_selector;
typedef struct lazy_flags {
addr_t result;
addr_t auxbits;
} lazy_flags;
/* Definition of hvf_x86_state is here */
struct HVFX86EmulatorState {
int interruptable;
uint64_t fetch_rip;
uint64_t rip;
struct x86_register regs[16];
struct x86_reg_flags rflags;
struct lazy_flags lflags;
struct x86_efer efer;
uint8_t mmio_buf[4096];
};
/* useful register access macros */
#define RIP(cpu) (cpu->hvf_emul->rip)
#define EIP(cpu) ((uint32_t)cpu->hvf_emul->rip)
#define RFLAGS(cpu) (cpu->hvf_emul->rflags.rflags)
#define EFLAGS(cpu) (cpu->hvf_emul->rflags.eflags)
#define RRX(cpu, reg) (cpu->hvf_emul->regs[reg].rrx)
#define RAX(cpu) RRX(cpu, REG_RAX)
#define RCX(cpu) RRX(cpu, REG_RCX)
#define RDX(cpu) RRX(cpu, REG_RDX)
#define RBX(cpu) RRX(cpu, REG_RBX)
#define RSP(cpu) RRX(cpu, REG_RSP)
#define RBP(cpu) RRX(cpu, REG_RBP)
#define RSI(cpu) RRX(cpu, REG_RSI)
#define RDI(cpu) RRX(cpu, REG_RDI)
#define R8(cpu) RRX(cpu, REG_R8)
#define R9(cpu) RRX(cpu, REG_R9)
#define R10(cpu) RRX(cpu, REG_R10)
#define R11(cpu) RRX(cpu, REG_R11)
#define R12(cpu) RRX(cpu, REG_R12)
#define R13(cpu) RRX(cpu, REG_R13)
#define R14(cpu) RRX(cpu, REG_R14)
#define R15(cpu) RRX(cpu, REG_R15)
#define ERX(cpu, reg) (cpu->hvf_emul->regs[reg].erx)
#define EAX(cpu) ERX(cpu, REG_RAX)
#define ECX(cpu) ERX(cpu, REG_RCX)
#define EDX(cpu) ERX(cpu, REG_RDX)
#define EBX(cpu) ERX(cpu, REG_RBX)
#define ESP(cpu) ERX(cpu, REG_RSP)
#define EBP(cpu) ERX(cpu, REG_RBP)
#define ESI(cpu) ERX(cpu, REG_RSI)
#define EDI(cpu) ERX(cpu, REG_RDI)
#define RX(cpu, reg) (cpu->hvf_emul->regs[reg].rx)
#define AX(cpu) RX(cpu, REG_RAX)
#define CX(cpu) RX(cpu, REG_RCX)
#define DX(cpu) RX(cpu, REG_RDX)
#define BP(cpu) RX(cpu, REG_RBP)
#define SP(cpu) RX(cpu, REG_RSP)
#define BX(cpu) RX(cpu, REG_RBX)
#define SI(cpu) RX(cpu, REG_RSI)
#define DI(cpu) RX(cpu, REG_RDI)
#define RL(cpu, reg) (cpu->hvf_emul->regs[reg].lx)
#define AL(cpu) RL(cpu, REG_RAX)
#define CL(cpu) RL(cpu, REG_RCX)
#define DL(cpu) RL(cpu, REG_RDX)
#define BL(cpu) RL(cpu, REG_RBX)
#define RH(cpu, reg) (cpu->hvf_emul->regs[reg].hx)
#define AH(cpu) RH(cpu, REG_RAX)
#define CH(cpu) RH(cpu, REG_RCX)
#define DH(cpu) RH(cpu, REG_RDX)
#define BH(cpu) RH(cpu, REG_RBX)
/* deal with GDT/LDT descriptors in memory */
bool x86_read_segment_descriptor(struct CPUState *cpu,
struct x86_segment_descriptor *desc,
x68_segment_selector sel);
bool x86_write_segment_descriptor(struct CPUState *cpu,
struct x86_segment_descriptor *desc,
x68_segment_selector sel);
bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
int gate);
/* helpers */
bool x86_is_protected(struct CPUState *cpu);
bool x86_is_real(struct CPUState *cpu);
bool x86_is_v8086(struct CPUState *cpu);
bool x86_is_long_mode(struct CPUState *cpu);
bool x86_is_long64_mode(struct CPUState *cpu);
bool x86_is_paging_mode(struct CPUState *cpu);
bool x86_is_pae_enabled(struct CPUState *cpu);
addr_t linear_addr(struct CPUState *cpu, addr_t addr, x86_reg_segment seg);
addr_t linear_addr_size(struct CPUState *cpu, addr_t addr, int size,
x86_reg_segment seg);
addr_t linear_rip(struct CPUState *cpu, addr_t rip);
static inline uint64_t rdtscp(void)
{
uint64_t tsc;
__asm__ __volatile__("rdtscp; " /* serializing read of tsc */
"shl $32,%%rdx; " /* shift higher 32 bits stored in rdx up */
"or %%rdx,%%rax" /* and or onto rax */
: "=a"(tsc) /* output to tsc variable */
:
: "%rcx", "%rdx"); /* rcx and rdx are clobbered */
return tsc;
}

164
target/i386/hvf/x86_cpuid.c Normal file
View file

@ -0,0 +1,164 @@
/*
* i386 CPUID helper functions
*
* Copyright (c) 2003 Fabrice Bellard
* Copyright (c) 2017 Google Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* cpuid
*/
#include "qemu/osdep.h"
#include "x86.h"
#include "vmx.h"
#include "sysemu/hvf.h"
static uint64_t xgetbv(uint32_t xcr)
{
uint32_t eax, edx;
__asm__ volatile ("xgetbv"
: "=a" (eax), "=d" (edx)
: "c" (xcr));
return (((uint64_t)edx) << 32) | eax;
}
static bool vmx_mpx_supported()
{
uint64_t cap_exit, cap_entry;
hv_vmx_read_capability(HV_VMX_CAP_ENTRY, &cap_entry);
hv_vmx_read_capability(HV_VMX_CAP_EXIT, &cap_exit);
return ((cap_exit & (1 << 23)) && (cap_entry & (1 << 16)));
}
uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx,
int reg)
{
uint64_t cap;
uint32_t eax, ebx, ecx, edx;
host_cpuid(func, idx, &eax, &ebx, &ecx, &edx);
switch (func) {
case 0:
eax = eax < (uint32_t)0xd ? eax : (uint32_t)0xd;
break;
case 1:
edx &= CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC |
CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX |
CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS;
ecx &= CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSSE3 |
CPUID_EXT_FMA | CPUID_EXT_CX16 | CPUID_EXT_PCID |
CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_MOVBE |
CPUID_EXT_POPCNT | CPUID_EXT_AES | CPUID_EXT_XSAVE |
CPUID_EXT_AVX | CPUID_EXT_F16C | CPUID_EXT_RDRAND;
ecx |= CPUID_EXT_HYPERVISOR;
break;
case 6:
eax = CPUID_6_EAX_ARAT;
ebx = 0;
ecx = 0;
edx = 0;
break;
case 7:
if (idx == 0) {
ebx &= CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 |
CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 |
CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_RTM |
CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_AVX512IFMA |
CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512PF |
CPUID_7_0_EBX_AVX512ER | CPUID_7_0_EBX_AVX512CD |
CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_CLWB |
CPUID_7_0_EBX_AVX512DQ | CPUID_7_0_EBX_SHA_NI |
CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512VL |
CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_MPX;
if (!vmx_mpx_supported()) {
ebx &= ~CPUID_7_0_EBX_MPX;
}
hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, &cap);
if (!(cap & CPU_BASED2_INVPCID)) {
ebx &= ~CPUID_7_0_EBX_INVPCID;
}
ecx &= CPUID_7_0_ECX_AVX512BMI | CPUID_7_0_ECX_AVX512_VPOPCNTDQ;
edx &= CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS;
} else {
ebx = 0;
ecx = 0;
edx = 0;
}
eax = 0;
break;
case 0xD:
if (idx == 0) {
uint64_t host_xcr0 = xgetbv(0);
uint64_t supp_xcr0 = host_xcr0 & (XSTATE_FP_MASK | XSTATE_SSE_MASK |
XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK |
XSTATE_BNDCSR_MASK | XSTATE_OPMASK_MASK |
XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK);
eax &= supp_xcr0;
if (!vmx_mpx_supported()) {
eax &= ~(XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK);
}
} else if (idx == 1) {
hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, &cap);
eax &= CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1;
if (!(cap & CPU_BASED2_XSAVES_XRSTORS)) {
eax &= ~CPUID_XSAVE_XSAVES;
}
}
break;
case 0x80000001:
/* LM only if HVF in 64-bit mode */
edx &= CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC |
CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
CPUID_EXT2_SYSCALL | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
CPUID_PAT | CPUID_PSE36 | CPUID_EXT2_MMXEXT | CPUID_MMX |
CPUID_FXSR | CPUID_EXT2_FXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_3DNOWEXT |
CPUID_EXT2_3DNOW | CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX;
hv_vmx_read_capability(HV_VMX_CAP_PROCBASED, &cap);
if (!(cap & CPU_BASED_TSC_OFFSET)) {
edx &= ~CPUID_EXT2_RDTSCP;
}
ecx &= CPUID_EXT3_LAHF_LM | CPUID_EXT3_CMP_LEG | CPUID_EXT3_CR8LEG |
CPUID_EXT3_ABM | CPUID_EXT3_SSE4A | CPUID_EXT3_MISALIGNSSE |
CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_OSVW | CPUID_EXT3_XOP |
CPUID_EXT3_FMA4 | CPUID_EXT3_TBM;
break;
default:
return 0;
}
switch (reg) {
case R_EAX:
return eax;
case R_EBX:
return ebx;
case R_ECX:
return ecx;
case R_EDX:
return edx;
default:
return 0;
}
}

2186
target/i386/hvf/x86_decode.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,325 @@
/*
* Copyright (C) 2016 Veertu Inc,
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <sys/types.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <stdarg.h>
#include "qemu-common.h"
#include "x86.h"
#include "cpu.h"
typedef enum x86_prefix {
/* group 1 */
PREFIX_LOCK = 0xf0,
PREFIX_REPN = 0xf2,
PREFIX_REP = 0xf3,
/* group 2 */
PREFIX_CS_SEG_OVEERIDE = 0x2e,
PREFIX_SS_SEG_OVEERIDE = 0x36,
PREFIX_DS_SEG_OVEERIDE = 0x3e,
PREFIX_ES_SEG_OVEERIDE = 0x26,
PREFIX_FS_SEG_OVEERIDE = 0x64,
PREFIX_GS_SEG_OVEERIDE = 0x65,
/* group 3 */
PREFIX_OP_SIZE_OVERRIDE = 0x66,
/* group 4 */
PREFIX_ADDR_SIZE_OVERRIDE = 0x67,
PREFIX_REX = 0x40,
} x86_prefix;
enum x86_decode_cmd {
X86_DECODE_CMD_INVL = 0,
X86_DECODE_CMD_PUSH,
X86_DECODE_CMD_PUSH_SEG,
X86_DECODE_CMD_POP,
X86_DECODE_CMD_POP_SEG,
X86_DECODE_CMD_MOV,
X86_DECODE_CMD_MOVSX,
X86_DECODE_CMD_MOVZX,
X86_DECODE_CMD_CALL_NEAR,
X86_DECODE_CMD_CALL_NEAR_ABS_INDIRECT,
X86_DECODE_CMD_CALL_FAR_ABS_INDIRECT,
X86_DECODE_CMD_CALL_FAR,
X86_DECODE_RET_NEAR,
X86_DECODE_RET_FAR,
X86_DECODE_CMD_ADD,
X86_DECODE_CMD_OR,
X86_DECODE_CMD_ADC,
X86_DECODE_CMD_SBB,
X86_DECODE_CMD_AND,
X86_DECODE_CMD_SUB,
X86_DECODE_CMD_XOR,
X86_DECODE_CMD_CMP,
X86_DECODE_CMD_INC,
X86_DECODE_CMD_DEC,
X86_DECODE_CMD_TST,
X86_DECODE_CMD_NOT,
X86_DECODE_CMD_NEG,
X86_DECODE_CMD_JMP_NEAR,
X86_DECODE_CMD_JMP_NEAR_ABS_INDIRECT,
X86_DECODE_CMD_JMP_FAR,
X86_DECODE_CMD_JMP_FAR_ABS_INDIRECT,
X86_DECODE_CMD_LEA,
X86_DECODE_CMD_JXX,
X86_DECODE_CMD_JCXZ,
X86_DECODE_CMD_SETXX,
X86_DECODE_CMD_MOV_TO_SEG,
X86_DECODE_CMD_MOV_FROM_SEG,
X86_DECODE_CMD_CLI,
X86_DECODE_CMD_STI,
X86_DECODE_CMD_CLD,
X86_DECODE_CMD_STD,
X86_DECODE_CMD_STC,
X86_DECODE_CMD_CLC,
X86_DECODE_CMD_OUT,
X86_DECODE_CMD_IN,
X86_DECODE_CMD_INS,
X86_DECODE_CMD_OUTS,
X86_DECODE_CMD_LIDT,
X86_DECODE_CMD_SIDT,
X86_DECODE_CMD_LGDT,
X86_DECODE_CMD_SGDT,
X86_DECODE_CMD_SMSW,
X86_DECODE_CMD_LMSW,
X86_DECODE_CMD_RDTSCP,
X86_DECODE_CMD_INVLPG,
X86_DECODE_CMD_MOV_TO_CR,
X86_DECODE_CMD_MOV_FROM_CR,
X86_DECODE_CMD_MOV_TO_DR,
X86_DECODE_CMD_MOV_FROM_DR,
X86_DECODE_CMD_PUSHF,
X86_DECODE_CMD_POPF,
X86_DECODE_CMD_CPUID,
X86_DECODE_CMD_ROL,
X86_DECODE_CMD_ROR,
X86_DECODE_CMD_RCL,
X86_DECODE_CMD_RCR,
X86_DECODE_CMD_SHL,
X86_DECODE_CMD_SAL,
X86_DECODE_CMD_SHR,
X86_DECODE_CMD_SHRD,
X86_DECODE_CMD_SHLD,
X86_DECODE_CMD_SAR,
X86_DECODE_CMD_DIV,
X86_DECODE_CMD_IDIV,
X86_DECODE_CMD_MUL,
X86_DECODE_CMD_IMUL_3,
X86_DECODE_CMD_IMUL_2,
X86_DECODE_CMD_IMUL_1,
X86_DECODE_CMD_MOVS,
X86_DECODE_CMD_CMPS,
X86_DECODE_CMD_SCAS,
X86_DECODE_CMD_LODS,
X86_DECODE_CMD_STOS,
X86_DECODE_CMD_BSWAP,
X86_DECODE_CMD_XCHG,
X86_DECODE_CMD_RDTSC,
X86_DECODE_CMD_RDMSR,
X86_DECODE_CMD_WRMSR,
X86_DECODE_CMD_ENTER,
X86_DECODE_CMD_LEAVE,
X86_DECODE_CMD_BT,
X86_DECODE_CMD_BTS,
X86_DECODE_CMD_BTC,
X86_DECODE_CMD_BTR,
X86_DECODE_CMD_BSF,
X86_DECODE_CMD_BSR,
X86_DECODE_CMD_IRET,
X86_DECODE_CMD_INT,
X86_DECODE_CMD_POPA,
X86_DECODE_CMD_PUSHA,
X86_DECODE_CMD_CWD,
X86_DECODE_CMD_CBW,
X86_DECODE_CMD_DAS,
X86_DECODE_CMD_AAD,
X86_DECODE_CMD_AAM,
X86_DECODE_CMD_AAS,
X86_DECODE_CMD_LOOP,
X86_DECODE_CMD_SLDT,
X86_DECODE_CMD_STR,
X86_DECODE_CMD_LLDT,
X86_DECODE_CMD_LTR,
X86_DECODE_CMD_VERR,
X86_DECODE_CMD_VERW,
X86_DECODE_CMD_SAHF,
X86_DECODE_CMD_LAHF,
X86_DECODE_CMD_WBINVD,
X86_DECODE_CMD_LDS,
X86_DECODE_CMD_LSS,
X86_DECODE_CMD_LES,
X86_DECODE_XMD_LGS,
X86_DECODE_CMD_LFS,
X86_DECODE_CMD_CMC,
X86_DECODE_CMD_XLAT,
X86_DECODE_CMD_NOP,
X86_DECODE_CMD_CMOV,
X86_DECODE_CMD_CLTS,
X86_DECODE_CMD_XADD,
X86_DECODE_CMD_HLT,
X86_DECODE_CMD_CMPXCHG8B,
X86_DECODE_CMD_CMPXCHG,
X86_DECODE_CMD_POPCNT,
X86_DECODE_CMD_FNINIT,
X86_DECODE_CMD_FLD,
X86_DECODE_CMD_FLDxx,
X86_DECODE_CMD_FNSTCW,
X86_DECODE_CMD_FNSTSW,
X86_DECODE_CMD_FNSETPM,
X86_DECODE_CMD_FSAVE,
X86_DECODE_CMD_FRSTOR,
X86_DECODE_CMD_FXSAVE,
X86_DECODE_CMD_FXRSTOR,
X86_DECODE_CMD_FDIV,
X86_DECODE_CMD_FMUL,
X86_DECODE_CMD_FSUB,
X86_DECODE_CMD_FADD,
X86_DECODE_CMD_EMMS,
X86_DECODE_CMD_MFENCE,
X86_DECODE_CMD_SFENCE,
X86_DECODE_CMD_LFENCE,
X86_DECODE_CMD_PREFETCH,
X86_DECODE_CMD_CLFLUSH,
X86_DECODE_CMD_FST,
X86_DECODE_CMD_FABS,
X86_DECODE_CMD_FUCOM,
X86_DECODE_CMD_FUCOMI,
X86_DECODE_CMD_FLDCW,
X86_DECODE_CMD_FXCH,
X86_DECODE_CMD_FCHS,
X86_DECODE_CMD_FCMOV,
X86_DECODE_CMD_FRNDINT,
X86_DECODE_CMD_FXAM,
X86_DECODE_CMD_LAST,
};
const char *decode_cmd_to_string(enum x86_decode_cmd cmd);
typedef struct x86_modrm {
union {
uint8_t modrm;
struct {
uint8_t rm:3;
uint8_t reg:3;
uint8_t mod:2;
};
};
} __attribute__ ((__packed__)) x86_modrm;
typedef struct x86_sib {
union {
uint8_t sib;
struct {
uint8_t base:3;
uint8_t index:3;
uint8_t scale:2;
};
};
} __attribute__ ((__packed__)) x86_sib;
typedef struct x86_rex {
union {
uint8_t rex;
struct {
uint8_t b:1;
uint8_t x:1;
uint8_t r:1;
uint8_t w:1;
uint8_t unused:4;
};
};
} __attribute__ ((__packed__)) x86_rex;
typedef enum x86_var_type {
X86_VAR_IMMEDIATE,
X86_VAR_OFFSET,
X86_VAR_REG,
X86_VAR_RM,
/* for floating point computations */
X87_VAR_REG,
X87_VAR_FLOATP,
X87_VAR_INTP,
X87_VAR_BYTEP,
} x86_var_type;
typedef struct x86_decode_op {
enum x86_var_type type;
int size;
int reg;
addr_t val;
addr_t ptr;
} x86_decode_op;
typedef struct x86_decode {
int len;
uint8_t opcode[4];
uint8_t opcode_len;
enum x86_decode_cmd cmd;
int addressing_size;
int operand_size;
int lock;
int rep;
int op_size_override;
int addr_size_override;
int segment_override;
int control_change_inst;
bool fwait;
bool fpop_stack;
bool frev;
uint32_t displacement;
uint8_t displacement_size;
struct x86_rex rex;
bool is_modrm;
bool sib_present;
struct x86_sib sib;
struct x86_modrm modrm;
struct x86_decode_op op[4];
bool is_fpu;
addr_t flags_mask;
} x86_decode;
uint64_t sign(uint64_t val, int size);
uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode);
addr_t get_reg_ref(CPUX86State *env, int reg, int is_extended, int size);
addr_t get_reg_val(CPUX86State *env, int reg, int is_extended, int size);
void calc_modrm_operand(CPUX86State *env, struct x86_decode *decode,
struct x86_decode_op *op);
addr_t decode_linear_addr(CPUX86State *env, struct x86_decode *decode,
addr_t addr, x86_reg_segment seg);
void init_decoder(void);
void calc_modrm_operand16(CPUX86State *env, struct x86_decode *decode,
struct x86_decode_op *op);
void calc_modrm_operand32(CPUX86State *env, struct x86_decode *decode,
struct x86_decode_op *op);
void calc_modrm_operand64(CPUX86State *env, struct x86_decode *decode,
struct x86_decode_op *op);
void set_addressing_size(CPUX86State *env, struct x86_decode *decode);
void set_operand_size(CPUX86State *env, struct x86_decode *decode);

124
target/i386/hvf/x86_descr.c Normal file
View file

@ -0,0 +1,124 @@
/*
* Copyright (C) 2016 Veertu Inc,
* Copyright (C) 2017 Google Inc,
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "vmx.h"
#include "x86_descr.h"
#define VMX_SEGMENT_FIELD(seg) \
[REG_SEG_##seg] = { \
.selector = VMCS_GUEST_##seg##_SELECTOR, \
.base = VMCS_GUEST_##seg##_BASE, \
.limit = VMCS_GUEST_##seg##_LIMIT, \
.ar_bytes = VMCS_GUEST_##seg##_ACCESS_RIGHTS, \
}
static const struct vmx_segment_field {
int selector;
int base;
int limit;
int ar_bytes;
} vmx_segment_fields[] = {
VMX_SEGMENT_FIELD(ES),
VMX_SEGMENT_FIELD(CS),
VMX_SEGMENT_FIELD(SS),
VMX_SEGMENT_FIELD(DS),
VMX_SEGMENT_FIELD(FS),
VMX_SEGMENT_FIELD(GS),
VMX_SEGMENT_FIELD(LDTR),
VMX_SEGMENT_FIELD(TR),
};
uint32_t vmx_read_segment_limit(CPUState *cpu, x86_reg_segment seg)
{
return (uint32_t)rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].limit);
}
uint32_t vmx_read_segment_ar(CPUState *cpu, x86_reg_segment seg)
{
return (uint32_t)rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].ar_bytes);
}
uint64_t vmx_read_segment_base(CPUState *cpu, x86_reg_segment seg)
{
return rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].base);
}
x68_segment_selector vmx_read_segment_selector(CPUState *cpu, x86_reg_segment seg)
{
x68_segment_selector sel;
sel.sel = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector);
return sel;
}
void vmx_write_segment_selector(struct CPUState *cpu, x68_segment_selector selector, x86_reg_segment seg)
{
wvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector, selector.sel);
}
void vmx_read_segment_descriptor(struct CPUState *cpu, struct vmx_segment *desc, x86_reg_segment seg)
{
desc->sel = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector);
desc->base = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].base);
desc->limit = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].limit);
desc->ar = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].ar_bytes);
}
void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc, x86_reg_segment seg)
{
const struct vmx_segment_field *sf = &vmx_segment_fields[seg];
wvmcs(cpu->hvf_fd, sf->base, desc->base);
wvmcs(cpu->hvf_fd, sf->limit, desc->limit);
wvmcs(cpu->hvf_fd, sf->selector, desc->sel);
wvmcs(cpu->hvf_fd, sf->ar_bytes, desc->ar);
}
void x86_segment_descriptor_to_vmx(struct CPUState *cpu, x68_segment_selector selector, struct x86_segment_descriptor *desc, struct vmx_segment *vmx_desc)
{
vmx_desc->sel = selector.sel;
vmx_desc->base = x86_segment_base(desc);
vmx_desc->limit = x86_segment_limit(desc);
vmx_desc->ar = (selector.sel ? 0 : 1) << 16 |
desc->g << 15 |
desc->db << 14 |
desc->l << 13 |
desc->avl << 12 |
desc->p << 7 |
desc->dpl << 5 |
desc->s << 4 |
desc->type;
}
void vmx_segment_to_x86_descriptor(struct CPUState *cpu, struct vmx_segment *vmx_desc, struct x86_segment_descriptor *desc)
{
x86_set_segment_limit(desc, vmx_desc->limit);
x86_set_segment_base(desc, vmx_desc->base);
desc->type = vmx_desc->ar & 15;
desc->s = (vmx_desc->ar >> 4) & 1;
desc->dpl = (vmx_desc->ar >> 5) & 3;
desc->p = (vmx_desc->ar >> 7) & 1;
desc->avl = (vmx_desc->ar >> 12) & 1;
desc->l = (vmx_desc->ar >> 13) & 1;
desc->db = (vmx_desc->ar >> 14) & 1;
desc->g = (vmx_desc->ar >> 15) & 1;
}

View file

@ -0,0 +1,55 @@
/*
* Copyright (C) 2016 Veertu Inc,
* Copyright (C) 2017 Google Inc,
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "x86.h"
typedef struct vmx_segment {
uint16_t sel;
uint64_t base;
uint64_t limit;
uint64_t ar;
} vmx_segment;
/* deal with vmstate descriptors */
void vmx_read_segment_descriptor(struct CPUState *cpu,
struct vmx_segment *desc, x86_reg_segment seg);
void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc,
x86_reg_segment seg);
x68_segment_selector vmx_read_segment_selector(struct CPUState *cpu,
x86_reg_segment seg);
void vmx_write_segment_selector(struct CPUState *cpu,
x68_segment_selector selector,
x86_reg_segment seg);
uint64_t vmx_read_segment_base(struct CPUState *cpu, x86_reg_segment seg);
void vmx_write_segment_base(struct CPUState *cpu, x86_reg_segment seg,
uint64_t base);
void x86_segment_descriptor_to_vmx(struct CPUState *cpu,
x68_segment_selector selector,
struct x86_segment_descriptor *desc,
struct vmx_segment *vmx_desc);
uint32_t vmx_read_segment_limit(CPUState *cpu, x86_reg_segment seg);
uint32_t vmx_read_segment_ar(CPUState *cpu, x86_reg_segment seg);
void vmx_segment_to_x86_descriptor(struct CPUState *cpu,
struct vmx_segment *vmx_desc,
struct x86_segment_descriptor *desc);

1537
target/i386/hvf/x86_emu.c Normal file

File diff suppressed because it is too large Load diff

49
target/i386/hvf/x86_emu.h Normal file
View file

@ -0,0 +1,49 @@
/*
* Copyright (C) 2016 Veertu Inc,
* Copyright (C) 2017 Google Inc,
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __X86_EMU_H__
#define __X86_EMU_H__
#include "x86.h"
#include "x86_decode.h"
#include "cpu.h"
void init_emu(void);
bool exec_instruction(struct CPUX86State *env, struct x86_decode *ins);
void load_regs(struct CPUState *cpu);
void store_regs(struct CPUState *cpu);
void simulate_rdmsr(struct CPUState *cpu);
void simulate_wrmsr(struct CPUState *cpu);
addr_t read_reg(CPUX86State *env, int reg, int size);
void write_reg(CPUX86State *env, int reg, addr_t val, int size);
addr_t read_val_from_reg(addr_t reg_ptr, int size);
void write_val_to_reg(addr_t reg_ptr, addr_t val, int size);
void write_val_ext(struct CPUX86State *env, addr_t ptr, addr_t val, int size);
uint8_t *read_mmio(struct CPUX86State *env, addr_t ptr, int bytes);
addr_t read_val_ext(struct CPUX86State *env, addr_t ptr, int size);
void exec_movzx(struct CPUX86State *env, struct x86_decode *decode);
void exec_shl(struct CPUX86State *env, struct x86_decode *decode);
void exec_movsx(struct CPUX86State *env, struct x86_decode *decode);
void exec_ror(struct CPUX86State *env, struct x86_decode *decode);
void exec_rol(struct CPUX86State *env, struct x86_decode *decode);
void exec_rcl(struct CPUX86State *env, struct x86_decode *decode);
void exec_rcr(struct CPUX86State *env, struct x86_decode *decode);
#endif

333
target/i386/hvf/x86_flags.c Normal file
View file

@ -0,0 +1,333 @@
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001-2012 The Bochs Project
// Copyright (C) 2017 Google Inc.
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
/////////////////////////////////////////////////////////////////////////
/*
* flags functions
*/
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "cpu.h"
#include "x86_flags.h"
#include "x86.h"
void SET_FLAGS_OxxxxC(CPUX86State *env, uint32_t new_of, uint32_t new_cf)
{
uint32_t temp_po = new_of ^ new_cf;
env->hvf_emul->lflags.auxbits &= ~(LF_MASK_PO | LF_MASK_CF);
env->hvf_emul->lflags.auxbits |= (temp_po << LF_BIT_PO) |
(new_cf << LF_BIT_CF);
}
void SET_FLAGS_OSZAPC_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
uint32_t diff)
{
SET_FLAGS_OSZAPC_SUB_32(v1, v2, diff);
}
void SET_FLAGS_OSZAPC_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
uint16_t diff)
{
SET_FLAGS_OSZAPC_SUB_16(v1, v2, diff);
}
void SET_FLAGS_OSZAPC_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
uint8_t diff)
{
SET_FLAGS_OSZAPC_SUB_8(v1, v2, diff);
}
void SET_FLAGS_OSZAPC_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
uint32_t diff)
{
SET_FLAGS_OSZAPC_ADD_32(v1, v2, diff);
}
void SET_FLAGS_OSZAPC_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
uint16_t diff)
{
SET_FLAGS_OSZAPC_ADD_16(v1, v2, diff);
}
void SET_FLAGS_OSZAPC_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
uint8_t diff)
{
SET_FLAGS_OSZAPC_ADD_8(v1, v2, diff);
}
void SET_FLAGS_OSZAP_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
uint32_t diff)
{
SET_FLAGS_OSZAP_SUB_32(v1, v2, diff);
}
void SET_FLAGS_OSZAP_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
uint16_t diff)
{
SET_FLAGS_OSZAP_SUB_16(v1, v2, diff);
}
void SET_FLAGS_OSZAP_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
uint8_t diff)
{
SET_FLAGS_OSZAP_SUB_8(v1, v2, diff);
}
void SET_FLAGS_OSZAP_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
uint32_t diff)
{
SET_FLAGS_OSZAP_ADD_32(v1, v2, diff);
}
void SET_FLAGS_OSZAP_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
uint16_t diff)
{
SET_FLAGS_OSZAP_ADD_16(v1, v2, diff);
}
void SET_FLAGS_OSZAP_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
uint8_t diff)
{
SET_FLAGS_OSZAP_ADD_8(v1, v2, diff);
}
void SET_FLAGS_OSZAPC_LOGIC32(CPUX86State *env, uint32_t diff)
{
SET_FLAGS_OSZAPC_LOGIC_32(diff);
}
void SET_FLAGS_OSZAPC_LOGIC16(CPUX86State *env, uint16_t diff)
{
SET_FLAGS_OSZAPC_LOGIC_16(diff);
}
void SET_FLAGS_OSZAPC_LOGIC8(CPUX86State *env, uint8_t diff)
{
SET_FLAGS_OSZAPC_LOGIC_8(diff);
}
void SET_FLAGS_SHR32(CPUX86State *env, uint32_t v, int count, uint32_t res)
{
int cf = (v >> (count - 1)) & 0x1;
int of = (((res << 1) ^ res) >> 31);
SET_FLAGS_OSZAPC_LOGIC_32(res);
SET_FLAGS_OxxxxC(env, of, cf);
}
void SET_FLAGS_SHR16(CPUX86State *env, uint16_t v, int count, uint16_t res)
{
int cf = (v >> (count - 1)) & 0x1;
int of = (((res << 1) ^ res) >> 15);
SET_FLAGS_OSZAPC_LOGIC_16(res);
SET_FLAGS_OxxxxC(env, of, cf);
}
void SET_FLAGS_SHR8(CPUX86State *env, uint8_t v, int count, uint8_t res)
{
int cf = (v >> (count - 1)) & 0x1;
int of = (((res << 1) ^ res) >> 7);
SET_FLAGS_OSZAPC_LOGIC_8(res);
SET_FLAGS_OxxxxC(env, of, cf);
}
void SET_FLAGS_SAR32(CPUX86State *env, int32_t v, int count, uint32_t res)
{
int cf = (v >> (count - 1)) & 0x1;
SET_FLAGS_OSZAPC_LOGIC_32(res);
SET_FLAGS_OxxxxC(env, 0, cf);
}
void SET_FLAGS_SAR16(CPUX86State *env, int16_t v, int count, uint16_t res)
{
int cf = (v >> (count - 1)) & 0x1;
SET_FLAGS_OSZAPC_LOGIC_16(res);
SET_FLAGS_OxxxxC(env, 0, cf);
}
void SET_FLAGS_SAR8(CPUX86State *env, int8_t v, int count, uint8_t res)
{
int cf = (v >> (count - 1)) & 0x1;
SET_FLAGS_OSZAPC_LOGIC_8(res);
SET_FLAGS_OxxxxC(env, 0, cf);
}
void SET_FLAGS_SHL32(CPUX86State *env, uint32_t v, int count, uint32_t res)
{
int of, cf;
cf = (v >> (32 - count)) & 0x1;
of = cf ^ (res >> 31);
SET_FLAGS_OSZAPC_LOGIC_32(res);
SET_FLAGS_OxxxxC(env, of, cf);
}
void SET_FLAGS_SHL16(CPUX86State *env, uint16_t v, int count, uint16_t res)
{
int of = 0, cf = 0;
if (count <= 16) {
cf = (v >> (16 - count)) & 0x1;
of = cf ^ (res >> 15);
}
SET_FLAGS_OSZAPC_LOGIC_16(res);
SET_FLAGS_OxxxxC(env, of, cf);
}
void SET_FLAGS_SHL8(CPUX86State *env, uint8_t v, int count, uint8_t res)
{
int of = 0, cf = 0;
if (count <= 8) {
cf = (v >> (8 - count)) & 0x1;
of = cf ^ (res >> 7);
}
SET_FLAGS_OSZAPC_LOGIC_8(res);
SET_FLAGS_OxxxxC(env, of, cf);
}
bool get_PF(CPUX86State *env)
{
uint32_t temp = (255 & env->hvf_emul->lflags.result);
temp = temp ^ (255 & (env->hvf_emul->lflags.auxbits >> LF_BIT_PDB));
temp = (temp ^ (temp >> 4)) & 0x0F;
return (0x9669U >> temp) & 1;
}
void set_PF(CPUX86State *env, bool val)
{
uint32_t temp = (255 & env->hvf_emul->lflags.result) ^ (!val);
env->hvf_emul->lflags.auxbits &= ~(LF_MASK_PDB);
env->hvf_emul->lflags.auxbits |= (temp << LF_BIT_PDB);
}
bool _get_OF(CPUX86State *env)
{
return ((env->hvf_emul->lflags.auxbits + (1U << LF_BIT_PO)) >> LF_BIT_CF) & 1;
}
bool get_OF(CPUX86State *env)
{
return _get_OF(env);
}
bool _get_CF(CPUX86State *env)
{
return (env->hvf_emul->lflags.auxbits >> LF_BIT_CF) & 1;
}
bool get_CF(CPUX86State *env)
{
return _get_CF(env);
}
void set_OF(CPUX86State *env, bool val)
{
SET_FLAGS_OxxxxC(env, val, _get_CF(env));
}
void set_CF(CPUX86State *env, bool val)
{
SET_FLAGS_OxxxxC(env, _get_OF(env), (val));
}
bool get_AF(CPUX86State *env)
{
return (env->hvf_emul->lflags.auxbits >> LF_BIT_AF) & 1;
}
void set_AF(CPUX86State *env, bool val)
{
env->hvf_emul->lflags.auxbits &= ~(LF_MASK_AF);
env->hvf_emul->lflags.auxbits |= (val) << LF_BIT_AF;
}
bool get_ZF(CPUX86State *env)
{
return !env->hvf_emul->lflags.result;
}
void set_ZF(CPUX86State *env, bool val)
{
if (val) {
env->hvf_emul->lflags.auxbits ^=
(((env->hvf_emul->lflags.result >> LF_SIGN_BIT) & 1) << LF_BIT_SD);
/* merge the parity bits into the Parity Delta Byte */
uint32_t temp_pdb = (255 & env->hvf_emul->lflags.result);
env->hvf_emul->lflags.auxbits ^= (temp_pdb << LF_BIT_PDB);
/* now zero the .result value */
env->hvf_emul->lflags.result = 0;
} else {
env->hvf_emul->lflags.result |= (1 << 8);
}
}
bool get_SF(CPUX86State *env)
{
return ((env->hvf_emul->lflags.result >> LF_SIGN_BIT) ^
(env->hvf_emul->lflags.auxbits >> LF_BIT_SD)) & 1;
}
void set_SF(CPUX86State *env, bool val)
{
bool temp_sf = get_SF(env);
env->hvf_emul->lflags.auxbits ^= (temp_sf ^ val) << LF_BIT_SD;
}
void set_OSZAPC(CPUX86State *env, uint32_t flags32)
{
set_OF(env, env->hvf_emul->rflags.of);
set_SF(env, env->hvf_emul->rflags.sf);
set_ZF(env, env->hvf_emul->rflags.zf);
set_AF(env, env->hvf_emul->rflags.af);
set_PF(env, env->hvf_emul->rflags.pf);
set_CF(env, env->hvf_emul->rflags.cf);
}
void lflags_to_rflags(CPUX86State *env)
{
env->hvf_emul->rflags.cf = get_CF(env);
env->hvf_emul->rflags.pf = get_PF(env);
env->hvf_emul->rflags.af = get_AF(env);
env->hvf_emul->rflags.zf = get_ZF(env);
env->hvf_emul->rflags.sf = get_SF(env);
env->hvf_emul->rflags.of = get_OF(env);
}
void rflags_to_lflags(CPUX86State *env)
{
env->hvf_emul->lflags.auxbits = env->hvf_emul->lflags.result = 0;
set_OF(env, env->hvf_emul->rflags.of);
set_SF(env, env->hvf_emul->rflags.sf);
set_ZF(env, env->hvf_emul->rflags.zf);
set_AF(env, env->hvf_emul->rflags.af);
set_PF(env, env->hvf_emul->rflags.pf);
set_CF(env, env->hvf_emul->rflags.cf);
}

243
target/i386/hvf/x86_flags.h Normal file
View file

@ -0,0 +1,243 @@
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001-2012 The Bochs Project
// Copyright (C) 2017 Google Inc.
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
/////////////////////////////////////////////////////////////////////////
/*
* x86 eflags functions
*/
#ifndef __X86_FLAGS_H__
#define __X86_FLAGS_H__
#include "x86_gen.h"
#include "cpu.h"
/* this is basically bocsh code */
#define LF_SIGN_BIT 31
#define LF_BIT_SD (0) /* lazy Sign Flag Delta */
#define LF_BIT_AF (3) /* lazy Adjust flag */
#define LF_BIT_PDB (8) /* lazy Parity Delta Byte (8 bits) */
#define LF_BIT_CF (31) /* lazy Carry Flag */
#define LF_BIT_PO (30) /* lazy Partial Overflow = CF ^ OF */
#define LF_MASK_SD (0x01 << LF_BIT_SD)
#define LF_MASK_AF (0x01 << LF_BIT_AF)
#define LF_MASK_PDB (0xFF << LF_BIT_PDB)
#define LF_MASK_CF (0x01 << LF_BIT_CF)
#define LF_MASK_PO (0x01 << LF_BIT_PO)
#define ADD_COUT_VEC(op1, op2, result) \
(((op1) & (op2)) | (((op1) | (op2)) & (~(result))))
#define SUB_COUT_VEC(op1, op2, result) \
(((~(op1)) & (op2)) | (((~(op1)) ^ (op2)) & (result)))
#define GET_ADD_OVERFLOW(op1, op2, result, mask) \
((((op1) ^ (result)) & ((op2) ^ (result))) & (mask))
/* ******************* */
/* OSZAPC */
/* ******************* */
/* size, carries, result */
#define SET_FLAGS_OSZAPC_SIZE(size, lf_carries, lf_result) { \
addr_t temp = ((lf_carries) & (LF_MASK_AF)) | \
(((lf_carries) >> (size - 2)) << LF_BIT_PO); \
env->hvf_emul->lflags.result = (addr_t)(int##size##_t)(lf_result); \
if ((size) == 32) { \
temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \
} else if ((size) == 16) { \
temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 16); \
} else if ((size) == 8) { \
temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 24); \
} else { \
VM_PANIC("unimplemented"); \
} \
env->hvf_emul->lflags.auxbits = (addr_t)(uint32_t)temp; \
}
/* carries, result */
#define SET_FLAGS_OSZAPC_8(carries, result) \
SET_FLAGS_OSZAPC_SIZE(8, carries, result)
#define SET_FLAGS_OSZAPC_16(carries, result) \
SET_FLAGS_OSZAPC_SIZE(16, carries, result)
#define SET_FLAGS_OSZAPC_32(carries, result) \
SET_FLAGS_OSZAPC_SIZE(32, carries, result)
/* result */
#define SET_FLAGS_OSZAPC_LOGIC_8(result_8) \
SET_FLAGS_OSZAPC_8(0, (result_8))
#define SET_FLAGS_OSZAPC_LOGIC_16(result_16) \
SET_FLAGS_OSZAPC_16(0, (result_16))
#define SET_FLAGS_OSZAPC_LOGIC_32(result_32) \
SET_FLAGS_OSZAPC_32(0, (result_32))
#define SET_FLAGS_OSZAPC_LOGIC_SIZE(size, result) { \
if (32 == size) { \
SET_FLAGS_OSZAPC_LOGIC_32(result); \
} else if (16 == size) { \
SET_FLAGS_OSZAPC_LOGIC_16(result); \
} else if (8 == size) { \
SET_FLAGS_OSZAPC_LOGIC_8(result); \
} else { \
VM_PANIC("unimplemented"); \
} \
}
/* op1, op2, result */
#define SET_FLAGS_OSZAPC_ADD_8(op1_8, op2_8, sum_8) \
SET_FLAGS_OSZAPC_8(ADD_COUT_VEC((op1_8), (op2_8), (sum_8)), (sum_8))
#define SET_FLAGS_OSZAPC_ADD_16(op1_16, op2_16, sum_16) \
SET_FLAGS_OSZAPC_16(ADD_COUT_VEC((op1_16), (op2_16), (sum_16)), (sum_16))
#define SET_FLAGS_OSZAPC_ADD_32(op1_32, op2_32, sum_32) \
SET_FLAGS_OSZAPC_32(ADD_COUT_VEC((op1_32), (op2_32), (sum_32)), (sum_32))
/* op1, op2, result */
#define SET_FLAGS_OSZAPC_SUB_8(op1_8, op2_8, diff_8) \
SET_FLAGS_OSZAPC_8(SUB_COUT_VEC((op1_8), (op2_8), (diff_8)), (diff_8))
#define SET_FLAGS_OSZAPC_SUB_16(op1_16, op2_16, diff_16) \
SET_FLAGS_OSZAPC_16(SUB_COUT_VEC((op1_16), (op2_16), (diff_16)), (diff_16))
#define SET_FLAGS_OSZAPC_SUB_32(op1_32, op2_32, diff_32) \
SET_FLAGS_OSZAPC_32(SUB_COUT_VEC((op1_32), (op2_32), (diff_32)), (diff_32))
/* ******************* */
/* OSZAP */
/* ******************* */
/* size, carries, result */
#define SET_FLAGS_OSZAP_SIZE(size, lf_carries, lf_result) { \
addr_t temp = ((lf_carries) & (LF_MASK_AF)) | \
(((lf_carries) >> (size - 2)) << LF_BIT_PO); \
if ((size) == 32) { \
temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \
} else if ((size) == 16) { \
temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 16); \
} else if ((size) == 8) { \
temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 24); \
} else { \
VM_PANIC("unimplemented"); \
} \
env->hvf_emul->lflags.result = (addr_t)(int##size##_t)(lf_result); \
addr_t delta_c = (env->hvf_emul->lflags.auxbits ^ temp) & LF_MASK_CF; \
delta_c ^= (delta_c >> 1); \
env->hvf_emul->lflags.auxbits = (addr_t)(uint32_t)(temp ^ delta_c); \
}
/* carries, result */
#define SET_FLAGS_OSZAP_8(carries, result) \
SET_FLAGS_OSZAP_SIZE(8, carries, result)
#define SET_FLAGS_OSZAP_16(carries, result) \
SET_FLAGS_OSZAP_SIZE(16, carries, result)
#define SET_FLAGS_OSZAP_32(carries, result) \
SET_FLAGS_OSZAP_SIZE(32, carries, result)
/* op1, op2, result */
#define SET_FLAGS_OSZAP_ADD_8(op1_8, op2_8, sum_8) \
SET_FLAGS_OSZAP_8(ADD_COUT_VEC((op1_8), (op2_8), (sum_8)), (sum_8))
#define SET_FLAGS_OSZAP_ADD_16(op1_16, op2_16, sum_16) \
SET_FLAGS_OSZAP_16(ADD_COUT_VEC((op1_16), (op2_16), (sum_16)), (sum_16))
#define SET_FLAGS_OSZAP_ADD_32(op1_32, op2_32, sum_32) \
SET_FLAGS_OSZAP_32(ADD_COUT_VEC((op1_32), (op2_32), (sum_32)), (sum_32))
/* op1, op2, result */
#define SET_FLAGS_OSZAP_SUB_8(op1_8, op2_8, diff_8) \
SET_FLAGS_OSZAP_8(SUB_COUT_VEC((op1_8), (op2_8), (diff_8)), (diff_8))
#define SET_FLAGS_OSZAP_SUB_16(op1_16, op2_16, diff_16) \
SET_FLAGS_OSZAP_16(SUB_COUT_VEC((op1_16), (op2_16), (diff_16)), (diff_16))
#define SET_FLAGS_OSZAP_SUB_32(op1_32, op2_32, diff_32) \
SET_FLAGS_OSZAP_32(SUB_COUT_VEC((op1_32), (op2_32), (diff_32)), (diff_32))
/* ******************* */
/* OSZAxC */
/* ******************* */
/* size, carries, result */
#define SET_FLAGS_OSZAxC_LOGIC_SIZE(size, lf_result) { \
bool saved_PF = getB_PF(); \
SET_FLAGS_OSZAPC_SIZE(size, (int##size##_t)(0), lf_result); \
set_PF(saved_PF); \
}
/* result */
#define SET_FLAGS_OSZAxC_LOGIC_32(result_32) \
SET_FLAGS_OSZAxC_LOGIC_SIZE(32, (result_32))
void lflags_to_rflags(CPUX86State *env);
void rflags_to_lflags(CPUX86State *env);
bool get_PF(CPUX86State *env);
void set_PF(CPUX86State *env, bool val);
bool get_CF(CPUX86State *env);
void set_CF(CPUX86State *env, bool val);
bool get_AF(CPUX86State *env);
void set_AF(CPUX86State *env, bool val);
bool get_ZF(CPUX86State *env);
void set_ZF(CPUX86State *env, bool val);
bool get_SF(CPUX86State *env);
void set_SF(CPUX86State *env, bool val);
bool get_OF(CPUX86State *env);
void set_OF(CPUX86State *env, bool val);
void set_OSZAPC(CPUX86State *env, uint32_t flags32);
void SET_FLAGS_OxxxxC(CPUX86State *env, uint32_t new_of, uint32_t new_cf);
void SET_FLAGS_OSZAPC_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
uint32_t diff);
void SET_FLAGS_OSZAPC_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
uint16_t diff);
void SET_FLAGS_OSZAPC_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
uint8_t diff);
void SET_FLAGS_OSZAPC_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
uint32_t diff);
void SET_FLAGS_OSZAPC_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
uint16_t diff);
void SET_FLAGS_OSZAPC_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
uint8_t diff);
void SET_FLAGS_OSZAP_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
uint32_t diff);
void SET_FLAGS_OSZAP_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
uint16_t diff);
void SET_FLAGS_OSZAP_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
uint8_t diff);
void SET_FLAGS_OSZAP_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
uint32_t diff);
void SET_FLAGS_OSZAP_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
uint16_t diff);
void SET_FLAGS_OSZAP_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
uint8_t diff);
void SET_FLAGS_OSZAPC_LOGIC32(CPUX86State *env, uint32_t diff);
void SET_FLAGS_OSZAPC_LOGIC16(CPUX86State *env, uint16_t diff);
void SET_FLAGS_OSZAPC_LOGIC8(CPUX86State *env, uint8_t diff);
void SET_FLAGS_SHR32(CPUX86State *env, uint32_t v, int count, uint32_t res);
void SET_FLAGS_SHR16(CPUX86State *env, uint16_t v, int count, uint16_t res);
void SET_FLAGS_SHR8(CPUX86State *env, uint8_t v, int count, uint8_t res);
void SET_FLAGS_SAR32(CPUX86State *env, int32_t v, int count, uint32_t res);
void SET_FLAGS_SAR16(CPUX86State *env, int16_t v, int count, uint16_t res);
void SET_FLAGS_SAR8(CPUX86State *env, int8_t v, int count, uint8_t res);
void SET_FLAGS_SHL32(CPUX86State *env, uint32_t v, int count, uint32_t res);
void SET_FLAGS_SHL16(CPUX86State *env, uint16_t v, int count, uint16_t res);
void SET_FLAGS_SHL8(CPUX86State *env, uint8_t v, int count, uint8_t res);
bool _get_OF(CPUX86State *env);
bool _get_CF(CPUX86State *env);
#endif /* __X86_FLAGS_H__ */

53
target/i386/hvf/x86_gen.h Normal file
View file

@ -0,0 +1,53 @@
/*
* Copyright (C) 2016 Veertu Inc,
* Copyright (C) 2017 Google Inc,
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __X86_GEN_H__
#define __X86_GEN_H__
#include <stdlib.h>
#include <stdio.h>
#include "qemu-common.h"
typedef uint64_t addr_t;
#define VM_PANIC(x) {\
printf("%s\n", x); \
abort(); \
}
#define VM_PANIC_ON(x) {\
if (x) { \
printf("%s\n", #x); \
abort(); \
} \
}
#define VM_PANIC_EX(...) {\
printf(__VA_ARGS__); \
abort(); \
}
#define VM_PANIC_ON_EX(x, ...) {\
if (x) { \
printf(__VA_ARGS__); \
abort(); \
} \
}
#define ZERO_INIT(obj) memset((void *) &obj, 0, sizeof(obj))
#endif

273
target/i386/hvf/x86_mmu.c Normal file
View file

@ -0,0 +1,273 @@
/*
* Copyright (C) 2016 Veertu Inc,
* Copyright (C) 2017 Google Inc,
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "x86.h"
#include "x86_mmu.h"
#include "string.h"
#include "vmcs.h"
#include "vmx.h"
#include "memory.h"
#include "exec/address-spaces.h"
#define pte_present(pte) (pte & PT_PRESENT)
#define pte_write_access(pte) (pte & PT_WRITE)
#define pte_user_access(pte) (pte & PT_USER)
#define pte_exec_access(pte) (!(pte & PT_NX))
#define pte_large_page(pte) (pte & PT_PS)
#define pte_global_access(pte) (pte & PT_GLOBAL)
#define PAE_CR3_MASK (~0x1fllu)
#define LEGACY_CR3_MASK (0xffffffff)
#define LEGACY_PTE_PAGE_MASK (0xffffffffllu << 12)
#define PAE_PTE_PAGE_MASK ((-1llu << 12) & ((1llu << 52) - 1))
#define PAE_PTE_LARGE_PAGE_MASK ((-1llu << (21)) & ((1llu << 52) - 1))
struct gpt_translation {
addr_t gva;
addr_t gpa;
int err_code;
uint64_t pte[5];
bool write_access;
bool user_access;
bool exec_access;
};
static int gpt_top_level(struct CPUState *cpu, bool pae)
{
if (!pae) {
return 2;
}
if (x86_is_long_mode(cpu)) {
return 4;
}
return 3;
}
static inline int gpt_entry(addr_t addr, int level, bool pae)
{
int level_shift = pae ? 9 : 10;
return (addr >> (level_shift * (level - 1) + 12)) & ((1 << level_shift) - 1);
}
static inline int pte_size(bool pae)
{
return pae ? 8 : 4;
}
static bool get_pt_entry(struct CPUState *cpu, struct gpt_translation *pt,
int level, bool pae)
{
int index;
uint64_t pte = 0;
addr_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK;
addr_t gpa = pt->pte[level] & page_mask;
if (level == 3 && !x86_is_long_mode(cpu)) {
gpa = pt->pte[level];
}
index = gpt_entry(pt->gva, level, pae);
address_space_rw(&address_space_memory, gpa + index * pte_size(pae),
MEMTXATTRS_UNSPECIFIED, (uint8_t *)&pte, pte_size(pae), 0);
pt->pte[level - 1] = pte;
return true;
}
/* test page table entry */
static bool test_pt_entry(struct CPUState *cpu, struct gpt_translation *pt,
int level, bool *is_large, bool pae)
{
uint64_t pte = pt->pte[level];
if (pt->write_access) {
pt->err_code |= MMU_PAGE_WT;
}
if (pt->user_access) {
pt->err_code |= MMU_PAGE_US;
}
if (pt->exec_access) {
pt->err_code |= MMU_PAGE_NX;
}
if (!pte_present(pte)) {
/* addr_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK; */
return false;
}
if (pae && !x86_is_long_mode(cpu) && 2 == level) {
goto exit;
}
if (1 == level && pte_large_page(pte)) {
pt->err_code |= MMU_PAGE_PT;
*is_large = true;
}
if (!level) {
pt->err_code |= MMU_PAGE_PT;
}
addr_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);
/* check protection */
if (cr0 & CR0_WP) {
if (pt->write_access && !pte_write_access(pte)) {
return false;
}
}
if (pt->user_access && !pte_user_access(pte)) {
return false;
}
if (pae && pt->exec_access && !pte_exec_access(pte)) {
return false;
}
exit:
/* TODO: check reserved bits */
return true;
}
static inline uint64_t pse_pte_to_page(uint64_t pte)
{
return ((pte & 0x1fe000) << 19) | (pte & 0xffc00000);
}
static inline uint64_t large_page_gpa(struct gpt_translation *pt, bool pae)
{
VM_PANIC_ON(!pte_large_page(pt->pte[1]))
/* 2Mb large page */
if (pae) {
return (pt->pte[1] & PAE_PTE_LARGE_PAGE_MASK) | (pt->gva & 0x1fffff);
}
/* 4Mb large page */
return pse_pte_to_page(pt->pte[1]) | (pt->gva & 0x3fffff);
}
static bool walk_gpt(struct CPUState *cpu, addr_t addr, int err_code,
struct gpt_translation *pt, bool pae)
{
int top_level, level;
bool is_large = false;
addr_t cr3 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR3);
addr_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK;
memset(pt, 0, sizeof(*pt));
top_level = gpt_top_level(cpu, pae);
pt->pte[top_level] = pae ? (cr3 & PAE_CR3_MASK) : (cr3 & LEGACY_CR3_MASK);
pt->gva = addr;
pt->user_access = (err_code & MMU_PAGE_US);
pt->write_access = (err_code & MMU_PAGE_WT);
pt->exec_access = (err_code & MMU_PAGE_NX);
for (level = top_level; level > 0; level--) {
get_pt_entry(cpu, pt, level, pae);
if (!test_pt_entry(cpu, pt, level - 1, &is_large, pae)) {
return false;
}
if (is_large) {
break;
}
}
if (!is_large) {
pt->gpa = (pt->pte[0] & page_mask) | (pt->gva & 0xfff);
} else {
pt->gpa = large_page_gpa(pt, pae);
}
return true;
}
bool mmu_gva_to_gpa(struct CPUState *cpu, addr_t gva, addr_t *gpa)
{
bool res;
struct gpt_translation pt;
int err_code = 0;
if (!x86_is_paging_mode(cpu)) {
*gpa = gva;
return true;
}
res = walk_gpt(cpu, gva, err_code, &pt, x86_is_pae_enabled(cpu));
if (res) {
*gpa = pt.gpa;
return true;
}
return false;
}
void vmx_write_mem(struct CPUState *cpu, addr_t gva, void *data, int bytes)
{
addr_t gpa;
while (bytes > 0) {
/* copy page */
int copy = MIN(bytes, 0x1000 - (gva & 0xfff));
if (!mmu_gva_to_gpa(cpu, gva, &gpa)) {
VM_PANIC_ON_EX(1, "%s: mmu_gva_to_gpa %llx failed\n", __func__,
gva);
} else {
address_space_rw(&address_space_memory, gpa, MEMTXATTRS_UNSPECIFIED,
data, copy, 1);
}
bytes -= copy;
gva += copy;
data += copy;
}
}
void vmx_read_mem(struct CPUState *cpu, void *data, addr_t gva, int bytes)
{
addr_t gpa;
while (bytes > 0) {
/* copy page */
int copy = MIN(bytes, 0x1000 - (gva & 0xfff));
if (!mmu_gva_to_gpa(cpu, gva, &gpa)) {
VM_PANIC_ON_EX(1, "%s: mmu_gva_to_gpa %llx failed\n", __func__,
gva);
}
address_space_rw(&address_space_memory, gpa, MEMTXATTRS_UNSPECIFIED,
data, copy, 0);
bytes -= copy;
gva += copy;
data += copy;
}
}

45
target/i386/hvf/x86_mmu.h Normal file
View file

@ -0,0 +1,45 @@
/*
* Copyright (C) 2016 Veertu Inc,
* Copyright (C) 2017 Google Inc,
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __X86_MMU_H__
#define __X86_MMU_H__
#include "x86_gen.h"
#define PT_PRESENT (1 << 0)
#define PT_WRITE (1 << 1)
#define PT_USER (1 << 2)
#define PT_WT (1 << 3)
#define PT_CD (1 << 4)
#define PT_ACCESSED (1 << 5)
#define PT_DIRTY (1 << 6)
#define PT_PS (1 << 7)
#define PT_GLOBAL (1 << 8)
#define PT_NX (1llu << 63)
/* error codes */
#define MMU_PAGE_PT (1 << 0)
#define MMU_PAGE_WT (1 << 1)
#define MMU_PAGE_US (1 << 2)
#define MMU_PAGE_NX (1 << 3)
bool mmu_gva_to_gpa(struct CPUState *cpu, addr_t gva, addr_t *gpa);
void vmx_write_mem(struct CPUState *cpu, addr_t gva, void *data, int bytes);
void vmx_read_mem(struct CPUState *cpu, void *data, addr_t gva, int bytes);
#endif /* __X86_MMU_H__ */

200
target/i386/hvf/x86_task.c Normal file
View file

@ -0,0 +1,200 @@
// This software is licensed under the terms of the GNU General Public
// License version 2, as published by the Free Software Foundation, and
// may be copied, distributed, and modified under those terms.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "qemu/error-report.h"
#include "sysemu/hvf.h"
#include "hvf-i386.h"
#include "vmcs.h"
#include "vmx.h"
#include "x86.h"
#include "x86_descr.h"
#include "x86_mmu.h"
#include "x86_decode.h"
#include "x86_emu.h"
#include "x86_task.h"
#include "x86hvf.h"
#include <Hypervisor/hv.h>
#include <Hypervisor/hv_vmx.h>
#include "exec/address-spaces.h"
#include "exec/exec-all.h"
#include "exec/ioport.h"
#include "hw/i386/apic_internal.h"
#include "hw/boards.h"
#include "qemu/main-loop.h"
#include "strings.h"
#include "sysemu/accel.h"
#include "sysemu/sysemu.h"
#include "target/i386/cpu.h"
// TODO: taskswitch handling
static void save_state_to_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
{
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
/* CR3 and ldt selector are not saved intentionally */
tss->eip = EIP(env);
tss->eflags = EFLAGS(env);
tss->eax = EAX(env);
tss->ecx = ECX(env);
tss->edx = EDX(env);
tss->ebx = EBX(env);
tss->esp = ESP(env);
tss->ebp = EBP(env);
tss->esi = ESI(env);
tss->edi = EDI(env);
tss->es = vmx_read_segment_selector(cpu, REG_SEG_ES).sel;
tss->cs = vmx_read_segment_selector(cpu, REG_SEG_CS).sel;
tss->ss = vmx_read_segment_selector(cpu, REG_SEG_SS).sel;
tss->ds = vmx_read_segment_selector(cpu, REG_SEG_DS).sel;
tss->fs = vmx_read_segment_selector(cpu, REG_SEG_FS).sel;
tss->gs = vmx_read_segment_selector(cpu, REG_SEG_GS).sel;
}
static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
{
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, tss->cr3);
RIP(env) = tss->eip;
EFLAGS(env) = tss->eflags | 2;
/* General purpose registers */
RAX(env) = tss->eax;
RCX(env) = tss->ecx;
RDX(env) = tss->edx;
RBX(env) = tss->ebx;
RSP(env) = tss->esp;
RBP(env) = tss->ebp;
RSI(env) = tss->esi;
RDI(env) = tss->edi;
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ldt}}, REG_SEG_LDTR);
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->es}}, REG_SEG_ES);
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->cs}}, REG_SEG_CS);
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ss}}, REG_SEG_SS);
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ds}}, REG_SEG_DS);
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->fs}}, REG_SEG_FS);
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->gs}}, REG_SEG_GS);
#if 0
load_segment(cpu, REG_SEG_LDTR, tss->ldt);
load_segment(cpu, REG_SEG_ES, tss->es);
load_segment(cpu, REG_SEG_CS, tss->cs);
load_segment(cpu, REG_SEG_SS, tss->ss);
load_segment(cpu, REG_SEG_DS, tss->ds);
load_segment(cpu, REG_SEG_FS, tss->fs);
load_segment(cpu, REG_SEG_GS, tss->gs);
#endif
}
static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segment_selector old_tss_sel,
uint64_t old_tss_base, struct x86_segment_descriptor *new_desc)
{
struct x86_tss_segment32 tss_seg;
uint32_t new_tss_base = x86_segment_base(new_desc);
uint32_t eip_offset = offsetof(struct x86_tss_segment32, eip);
uint32_t ldt_sel_offset = offsetof(struct x86_tss_segment32, ldt);
vmx_read_mem(cpu, &tss_seg, old_tss_base, sizeof(tss_seg));
save_state_to_tss32(cpu, &tss_seg);
vmx_write_mem(cpu, old_tss_base + eip_offset, &tss_seg.eip, ldt_sel_offset - eip_offset);
vmx_read_mem(cpu, &tss_seg, new_tss_base, sizeof(tss_seg));
if (old_tss_sel.sel != 0xffff) {
tss_seg.prev_tss = old_tss_sel.sel;
vmx_write_mem(cpu, new_tss_base, &tss_seg.prev_tss, sizeof(tss_seg.prev_tss));
}
load_state_from_tss32(cpu, &tss_seg);
return 0;
}
void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type)
{
uint64_t rip = rreg(cpu->hvf_fd, HV_X86_RIP);
if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION &&
gate_type != VMCS_INTR_T_HWINTR &&
gate_type != VMCS_INTR_T_NMI)) {
int ins_len = rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);
macvm_set_rip(cpu, rip + ins_len);
return;
}
load_regs(cpu);
struct x86_segment_descriptor curr_tss_desc, next_tss_desc;
int ret;
x68_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, REG_SEG_TR);
uint64_t old_tss_base = vmx_read_segment_base(cpu, REG_SEG_TR);
uint32_t desc_limit;
struct x86_call_gate task_gate_desc;
struct vmx_segment vmx_seg;
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
x86_read_segment_descriptor(cpu, &next_tss_desc, tss_sel);
x86_read_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel);
if (reason == TSR_IDT_GATE && gate_valid) {
int dpl;
ret = x86_read_call_gate(cpu, &task_gate_desc, gate);
dpl = task_gate_desc.dpl;
x68_segment_selector cs = vmx_read_segment_selector(cpu, REG_SEG_CS);
if (tss_sel.rpl > dpl || cs.rpl > dpl)
;//DPRINTF("emulate_gp");
}
desc_limit = x86_segment_limit(&next_tss_desc);
if (!next_tss_desc.p || ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || desc_limit < 0x2b)) {
VM_PANIC("emulate_ts");
}
if (reason == TSR_IRET || reason == TSR_JMP) {
curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
x86_write_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel);
}
if (reason == TSR_IRET)
EFLAGS(env) &= ~RFLAGS_NT;
if (reason != TSR_CALL && reason != TSR_IDT_GATE)
old_tss_sel.sel = 0xffff;
if (reason != TSR_IRET) {
next_tss_desc.type |= (1 << 1); /* set busy flag */
x86_write_segment_descriptor(cpu, &next_tss_desc, tss_sel);
}
if (next_tss_desc.type & 8)
ret = task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
else
//ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
VM_PANIC("task_switch_16");
macvm_set_cr0(cpu->hvf_fd, rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0) | CR0_TS);
x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg);
vmx_write_segment_descriptor(cpu, &vmx_seg, REG_SEG_TR);
store_regs(cpu);
hv_vcpu_invalidate_tlb(cpu->hvf_fd);
hv_vcpu_flush(cpu->hvf_fd);
}

View file

@ -0,0 +1,18 @@
/* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 or
* (at your option) version 3 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef HVF_TASK
#define HVF_TASK
void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel,
int reason, bool gate_valid, uint8_t gate, uint64_t gate_type);
#endif

465
target/i386/hvf/x86hvf.c Normal file
View file

@ -0,0 +1,465 @@
/*
* Copyright (c) 2003-2008 Fabrice Bellard
* Copyright (C) 2016 Veertu Inc,
* Copyright (C) 2017 Google Inc,
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "x86hvf.h"
#include "vmx.h"
#include "vmcs.h"
#include "cpu.h"
#include "x86_descr.h"
#include "x86_decode.h"
#include "hw/i386/apic_internal.h"
#include <stdio.h>
#include <stdlib.h>
#include <Hypervisor/hv.h>
#include <Hypervisor/hv_vmx.h>
#include <stdint.h>
void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg,
SegmentCache *qseg, bool is_tr)
{
vmx_seg->sel = qseg->selector;
vmx_seg->base = qseg->base;
vmx_seg->limit = qseg->limit;
if (!qseg->selector && !x86_is_real(cpu) && !is_tr) {
/* the TR register is usable after processor reset despite
* having a null selector */
vmx_seg->ar = 1 << 16;
return;
}
vmx_seg->ar = (qseg->flags >> DESC_TYPE_SHIFT) & 0xf;
vmx_seg->ar |= ((qseg->flags >> DESC_G_SHIFT) & 1) << 15;
vmx_seg->ar |= ((qseg->flags >> DESC_B_SHIFT) & 1) << 14;
vmx_seg->ar |= ((qseg->flags >> DESC_L_SHIFT) & 1) << 13;
vmx_seg->ar |= ((qseg->flags >> DESC_AVL_SHIFT) & 1) << 12;
vmx_seg->ar |= ((qseg->flags >> DESC_P_SHIFT) & 1) << 7;
vmx_seg->ar |= ((qseg->flags >> DESC_DPL_SHIFT) & 3) << 5;
vmx_seg->ar |= ((qseg->flags >> DESC_S_SHIFT) & 1) << 4;
}
void hvf_get_segment(SegmentCache *qseg, struct vmx_segment *vmx_seg)
{
qseg->limit = vmx_seg->limit;
qseg->base = vmx_seg->base;
qseg->selector = vmx_seg->sel;
qseg->flags = ((vmx_seg->ar & 0xf) << DESC_TYPE_SHIFT) |
(((vmx_seg->ar >> 4) & 1) << DESC_S_SHIFT) |
(((vmx_seg->ar >> 5) & 3) << DESC_DPL_SHIFT) |
(((vmx_seg->ar >> 7) & 1) << DESC_P_SHIFT) |
(((vmx_seg->ar >> 12) & 1) << DESC_AVL_SHIFT) |
(((vmx_seg->ar >> 13) & 1) << DESC_L_SHIFT) |
(((vmx_seg->ar >> 14) & 1) << DESC_B_SHIFT) |
(((vmx_seg->ar >> 15) & 1) << DESC_G_SHIFT);
}
void hvf_put_xsave(CPUState *cpu_state)
{
struct X86XSaveArea *xsave;
xsave = X86_CPU(cpu_state)->env.kvm_xsave_buf;
x86_cpu_xsave_all_areas(X86_CPU(cpu_state), xsave);
if (hv_vcpu_write_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) {
abort();
}
}
void hvf_put_segments(CPUState *cpu_state)
{
CPUX86State *env = &X86_CPU(cpu_state)->env;
struct vmx_segment seg;
wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit);
wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE, env->idt.base);
wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit);
wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE, env->gdt.base);
/* wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR2, env->cr[2]); */
wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3, env->cr[3]);
vmx_update_tpr(cpu_state);
wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER, env->efer);
macvm_set_cr4(cpu_state->hvf_fd, env->cr[4]);
macvm_set_cr0(cpu_state->hvf_fd, env->cr[0]);
hvf_set_segment(cpu_state, &seg, &env->segs[R_CS], false);
vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_CS);
hvf_set_segment(cpu_state, &seg, &env->segs[R_DS], false);
vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_DS);
hvf_set_segment(cpu_state, &seg, &env->segs[R_ES], false);
vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_ES);
hvf_set_segment(cpu_state, &seg, &env->segs[R_SS], false);
vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_SS);
hvf_set_segment(cpu_state, &seg, &env->segs[R_FS], false);
vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_FS);
hvf_set_segment(cpu_state, &seg, &env->segs[R_GS], false);
vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_GS);
hvf_set_segment(cpu_state, &seg, &env->tr, true);
vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_TR);
hvf_set_segment(cpu_state, &seg, &env->ldt, false);
vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_LDTR);
hv_vcpu_flush(cpu_state->hvf_fd);
}
void hvf_put_msrs(CPUState *cpu_state)
{
CPUX86State *env = &X86_CPU(cpu_state)->env;
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS,
env->sysenter_cs);
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP,
env->sysenter_esp);
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP,
env->sysenter_eip);
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_STAR, env->star);
#ifdef TARGET_X86_64
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_CSTAR, env->cstar);
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, env->kernelgsbase);
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FMASK, env->fmask);
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_LSTAR, env->lstar);
#endif
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_GSBASE, env->segs[R_GS].base);
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FSBASE, env->segs[R_FS].base);
/* if (!osx_is_sierra())
wvmcs(cpu_state->hvf_fd, VMCS_TSC_OFFSET, env->tsc - rdtscp());*/
hv_vm_sync_tsc(env->tsc);
}
void hvf_get_xsave(CPUState *cpu_state)
{
struct X86XSaveArea *xsave;
xsave = X86_CPU(cpu_state)->env.kvm_xsave_buf;
if (hv_vcpu_read_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) {
abort();
}
x86_cpu_xrstor_all_areas(X86_CPU(cpu_state), xsave);
}
void hvf_get_segments(CPUState *cpu_state)
{
CPUX86State *env = &X86_CPU(cpu_state)->env;
struct vmx_segment seg;
env->interrupt_injected = -1;
vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_CS);
hvf_get_segment(&env->segs[R_CS], &seg);
vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_DS);
hvf_get_segment(&env->segs[R_DS], &seg);
vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_ES);
hvf_get_segment(&env->segs[R_ES], &seg);
vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_FS);
hvf_get_segment(&env->segs[R_FS], &seg);
vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_GS);
hvf_get_segment(&env->segs[R_GS], &seg);
vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_SS);
hvf_get_segment(&env->segs[R_SS], &seg);
vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_TR);
hvf_get_segment(&env->tr, &seg);
vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_LDTR);
hvf_get_segment(&env->ldt, &seg);
env->idt.limit = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT);
env->idt.base = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE);
env->gdt.limit = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT);
env->gdt.base = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE);
env->cr[0] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR0);
env->cr[2] = 0;
env->cr[3] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3);
env->cr[4] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR4);
env->efer = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER);
}
void hvf_get_msrs(CPUState *cpu_state)
{
CPUX86State *env = &X86_CPU(cpu_state)->env;
uint64_t tmp;
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS, &tmp);
env->sysenter_cs = tmp;
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP, &tmp);
env->sysenter_esp = tmp;
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP, &tmp);
env->sysenter_eip = tmp;
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_STAR, &env->star);
#ifdef TARGET_X86_64
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_CSTAR, &env->cstar);
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, &env->kernelgsbase);
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_FMASK, &env->fmask);
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_LSTAR, &env->lstar);
#endif
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_APICBASE, &tmp);
env->tsc = rdtscp() + rvmcs(cpu_state->hvf_fd, VMCS_TSC_OFFSET);
}
int hvf_put_registers(CPUState *cpu_state)
{
X86CPU *x86cpu = X86_CPU(cpu_state);
CPUX86State *env = &x86cpu->env;
wreg(cpu_state->hvf_fd, HV_X86_RAX, env->regs[R_EAX]);
wreg(cpu_state->hvf_fd, HV_X86_RBX, env->regs[R_EBX]);
wreg(cpu_state->hvf_fd, HV_X86_RCX, env->regs[R_ECX]);
wreg(cpu_state->hvf_fd, HV_X86_RDX, env->regs[R_EDX]);
wreg(cpu_state->hvf_fd, HV_X86_RBP, env->regs[R_EBP]);
wreg(cpu_state->hvf_fd, HV_X86_RSP, env->regs[R_ESP]);
wreg(cpu_state->hvf_fd, HV_X86_RSI, env->regs[R_ESI]);
wreg(cpu_state->hvf_fd, HV_X86_RDI, env->regs[R_EDI]);
wreg(cpu_state->hvf_fd, HV_X86_R8, env->regs[8]);
wreg(cpu_state->hvf_fd, HV_X86_R9, env->regs[9]);
wreg(cpu_state->hvf_fd, HV_X86_R10, env->regs[10]);
wreg(cpu_state->hvf_fd, HV_X86_R11, env->regs[11]);
wreg(cpu_state->hvf_fd, HV_X86_R12, env->regs[12]);
wreg(cpu_state->hvf_fd, HV_X86_R13, env->regs[13]);
wreg(cpu_state->hvf_fd, HV_X86_R14, env->regs[14]);
wreg(cpu_state->hvf_fd, HV_X86_R15, env->regs[15]);
wreg(cpu_state->hvf_fd, HV_X86_RFLAGS, env->eflags);
wreg(cpu_state->hvf_fd, HV_X86_RIP, env->eip);
wreg(cpu_state->hvf_fd, HV_X86_XCR0, env->xcr0);
hvf_put_xsave(cpu_state);
hvf_put_segments(cpu_state);
hvf_put_msrs(cpu_state);
wreg(cpu_state->hvf_fd, HV_X86_DR0, env->dr[0]);
wreg(cpu_state->hvf_fd, HV_X86_DR1, env->dr[1]);
wreg(cpu_state->hvf_fd, HV_X86_DR2, env->dr[2]);
wreg(cpu_state->hvf_fd, HV_X86_DR3, env->dr[3]);
wreg(cpu_state->hvf_fd, HV_X86_DR4, env->dr[4]);
wreg(cpu_state->hvf_fd, HV_X86_DR5, env->dr[5]);
wreg(cpu_state->hvf_fd, HV_X86_DR6, env->dr[6]);
wreg(cpu_state->hvf_fd, HV_X86_DR7, env->dr[7]);
return 0;
}
int hvf_get_registers(CPUState *cpu_state)
{
X86CPU *x86cpu = X86_CPU(cpu_state);
CPUX86State *env = &x86cpu->env;
env->regs[R_EAX] = rreg(cpu_state->hvf_fd, HV_X86_RAX);
env->regs[R_EBX] = rreg(cpu_state->hvf_fd, HV_X86_RBX);
env->regs[R_ECX] = rreg(cpu_state->hvf_fd, HV_X86_RCX);
env->regs[R_EDX] = rreg(cpu_state->hvf_fd, HV_X86_RDX);
env->regs[R_EBP] = rreg(cpu_state->hvf_fd, HV_X86_RBP);
env->regs[R_ESP] = rreg(cpu_state->hvf_fd, HV_X86_RSP);
env->regs[R_ESI] = rreg(cpu_state->hvf_fd, HV_X86_RSI);
env->regs[R_EDI] = rreg(cpu_state->hvf_fd, HV_X86_RDI);
env->regs[8] = rreg(cpu_state->hvf_fd, HV_X86_R8);
env->regs[9] = rreg(cpu_state->hvf_fd, HV_X86_R9);
env->regs[10] = rreg(cpu_state->hvf_fd, HV_X86_R10);
env->regs[11] = rreg(cpu_state->hvf_fd, HV_X86_R11);
env->regs[12] = rreg(cpu_state->hvf_fd, HV_X86_R12);
env->regs[13] = rreg(cpu_state->hvf_fd, HV_X86_R13);
env->regs[14] = rreg(cpu_state->hvf_fd, HV_X86_R14);
env->regs[15] = rreg(cpu_state->hvf_fd, HV_X86_R15);
env->eflags = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);
env->eip = rreg(cpu_state->hvf_fd, HV_X86_RIP);
hvf_get_xsave(cpu_state);
env->xcr0 = rreg(cpu_state->hvf_fd, HV_X86_XCR0);
hvf_get_segments(cpu_state);
hvf_get_msrs(cpu_state);
env->dr[0] = rreg(cpu_state->hvf_fd, HV_X86_DR0);
env->dr[1] = rreg(cpu_state->hvf_fd, HV_X86_DR1);
env->dr[2] = rreg(cpu_state->hvf_fd, HV_X86_DR2);
env->dr[3] = rreg(cpu_state->hvf_fd, HV_X86_DR3);
env->dr[4] = rreg(cpu_state->hvf_fd, HV_X86_DR4);
env->dr[5] = rreg(cpu_state->hvf_fd, HV_X86_DR5);
env->dr[6] = rreg(cpu_state->hvf_fd, HV_X86_DR6);
env->dr[7] = rreg(cpu_state->hvf_fd, HV_X86_DR7);
return 0;
}
static void vmx_set_int_window_exiting(CPUState *cpu)
{
uint64_t val;
val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val |
VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING);
}
void vmx_clear_int_window_exiting(CPUState *cpu)
{
uint64_t val;
val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val &
~VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING);
}
#define NMI_VEC 2
bool hvf_inject_interrupts(CPUState *cpu_state)
{
X86CPU *x86cpu = X86_CPU(cpu_state);
CPUX86State *env = &x86cpu->env;
uint8_t vector;
uint64_t intr_type;
bool have_event = true;
if (env->interrupt_injected != -1) {
vector = env->interrupt_injected;
intr_type = VMCS_INTR_T_SWINTR;
} else if (env->exception_injected != -1) {
vector = env->exception_injected;
if (vector == EXCP03_INT3 || vector == EXCP04_INTO) {
intr_type = VMCS_INTR_T_SWEXCEPTION;
} else {
intr_type = VMCS_INTR_T_HWEXCEPTION;
}
} else if (env->nmi_injected) {
vector = NMI_VEC;
intr_type = VMCS_INTR_T_NMI;
} else {
have_event = false;
}
uint64_t info = 0;
if (have_event) {
info = vector | intr_type | VMCS_INTR_VALID;
uint64_t reason = rvmcs(cpu_state->hvf_fd, VMCS_EXIT_REASON);
if (env->nmi_injected && reason != EXIT_REASON_TASK_SWITCH) {
vmx_clear_nmi_blocking(cpu_state);
}
if (!(env->hflags2 & HF2_NMI_MASK) || intr_type != VMCS_INTR_T_NMI) {
info &= ~(1 << 12); /* clear undefined bit */
if (intr_type == VMCS_INTR_T_SWINTR ||
intr_type == VMCS_INTR_T_SWEXCEPTION) {
wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INST_LENGTH, env->ins_len);
}
if (env->has_error_code) {
wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_EXCEPTION_ERROR,
env->error_code);
}
/*printf("reinject %lx err %d\n", info, err);*/
wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info);
};
}
if (cpu_state->interrupt_request & CPU_INTERRUPT_NMI) {
if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) {
cpu_state->interrupt_request &= ~CPU_INTERRUPT_NMI;
info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | NMI_VEC;
wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info);
} else {
vmx_set_nmi_window_exiting(cpu_state);
}
}
if (!(env->hflags & HF_INHIBIT_IRQ_MASK) &&
(cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&
(EFLAGS(env) & IF_MASK) && !(info & VMCS_INTR_VALID)) {
int line = cpu_get_pic_interrupt(&x86cpu->env);
cpu_state->interrupt_request &= ~CPU_INTERRUPT_HARD;
if (line >= 0) {
wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, line |
VMCS_INTR_VALID | VMCS_INTR_T_HWINTR);
}
}
if (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) {
vmx_set_int_window_exiting(cpu_state);
}
return (cpu_state->interrupt_request
& (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR));
}
int hvf_process_events(CPUState *cpu_state)
{
X86CPU *cpu = X86_CPU(cpu_state);
CPUX86State *env = &cpu->env;
EFLAGS(env) = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);
if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) {
hvf_cpu_synchronize_state(cpu_state);
do_cpu_init(cpu);
}
if (cpu_state->interrupt_request & CPU_INTERRUPT_POLL) {
cpu_state->interrupt_request &= ~CPU_INTERRUPT_POLL;
apic_poll_irq(cpu->apic_state);
}
if (((cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&
(EFLAGS(env) & IF_MASK)) ||
(cpu_state->interrupt_request & CPU_INTERRUPT_NMI)) {
cpu_state->halted = 0;
}
if (cpu_state->interrupt_request & CPU_INTERRUPT_SIPI) {
hvf_cpu_synchronize_state(cpu_state);
do_cpu_sipi(cpu);
}
if (cpu_state->interrupt_request & CPU_INTERRUPT_TPR) {
cpu_state->interrupt_request &= ~CPU_INTERRUPT_TPR;
hvf_cpu_synchronize_state(cpu_state);
apic_handle_tpr_access_report(cpu->apic_state, env->eip,
env->tpr_access_type);
}
return cpu_state->halted;
}

39
target/i386/hvf/x86hvf.h Normal file
View file

@ -0,0 +1,39 @@
/*
* Copyright (C) 2016 Veertu Inc,
* Copyright (C) 2017 Google Inc,
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef X86HVF_H
#define X86HVF_H
#include "cpu.h"
#include "x86_descr.h"
int hvf_process_events(CPUState *);
int hvf_put_registers(CPUState *);
int hvf_get_registers(CPUState *);
bool hvf_inject_interrupts(CPUState *);
void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg,
SegmentCache *qseg, bool is_tr);
void hvf_get_segment(SegmentCache *qseg, struct vmx_segment *vmx_seg);
void hvf_put_xsave(CPUState *cpu_state);
void hvf_put_segments(CPUState *cpu_state);
void hvf_put_msrs(CPUState *cpu_state);
void hvf_get_xsave(CPUState *cpu_state);
void hvf_get_msrs(CPUState *cpu_state);
void vmx_clear_int_window_exiting(CPUState *cpu);
void hvf_get_segments(CPUState *cpu_state);
void vmx_update_tpr(CPUState *cpu);
void hvf_cpu_synchronize_state(CPUState *cpu_state);
#endif