mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-11 03:24:58 -06:00
accel/hvf: Use accel-specific per-vcpu @dirty field
HVF has a specific use of the CPUState::vcpu_dirty field (CPUState::vcpu_dirty is not used by common code). To make this field accel-specific, add and use a new @dirty variable in the AccelCPUState structure. Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20240424174506.326-4-philmd@linaro.org>
This commit is contained in:
parent
79f1926b2d
commit
e620363687
6 changed files with 12 additions and 12 deletions
|
@ -204,15 +204,15 @@ static void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
|
||||||
|
|
||||||
static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
|
static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
|
||||||
{
|
{
|
||||||
if (!cpu->vcpu_dirty) {
|
if (!cpu->accel->dirty) {
|
||||||
hvf_get_registers(cpu);
|
hvf_get_registers(cpu);
|
||||||
cpu->vcpu_dirty = true;
|
cpu->accel->dirty = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hvf_cpu_synchronize_state(CPUState *cpu)
|
static void hvf_cpu_synchronize_state(CPUState *cpu)
|
||||||
{
|
{
|
||||||
if (!cpu->vcpu_dirty) {
|
if (!cpu->accel->dirty) {
|
||||||
run_on_cpu(cpu, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
|
run_on_cpu(cpu, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -221,7 +221,7 @@ static void do_hvf_cpu_synchronize_set_dirty(CPUState *cpu,
|
||||||
run_on_cpu_data arg)
|
run_on_cpu_data arg)
|
||||||
{
|
{
|
||||||
/* QEMU state is the reference, push it to HVF now and on next entry */
|
/* QEMU state is the reference, push it to HVF now and on next entry */
|
||||||
cpu->vcpu_dirty = true;
|
cpu->accel->dirty = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hvf_cpu_synchronize_post_reset(CPUState *cpu)
|
static void hvf_cpu_synchronize_post_reset(CPUState *cpu)
|
||||||
|
@ -402,7 +402,7 @@ static int hvf_init_vcpu(CPUState *cpu)
|
||||||
#else
|
#else
|
||||||
r = hv_vcpu_create((hv_vcpuid_t *)&cpu->accel->fd, HV_VCPU_DEFAULT);
|
r = hv_vcpu_create((hv_vcpuid_t *)&cpu->accel->fd, HV_VCPU_DEFAULT);
|
||||||
#endif
|
#endif
|
||||||
cpu->vcpu_dirty = 1;
|
cpu->accel->dirty = true;
|
||||||
assert_hvf_ok(r);
|
assert_hvf_ok(r);
|
||||||
|
|
||||||
cpu->accel->guest_debug_enabled = false;
|
cpu->accel->guest_debug_enabled = false;
|
||||||
|
|
|
@ -525,6 +525,7 @@ struct CPUState {
|
||||||
uint32_t kvm_fetch_index;
|
uint32_t kvm_fetch_index;
|
||||||
uint64_t dirty_pages;
|
uint64_t dirty_pages;
|
||||||
int kvm_vcpu_stats_fd;
|
int kvm_vcpu_stats_fd;
|
||||||
|
bool vcpu_dirty;
|
||||||
|
|
||||||
/* Use by accel-block: CPU is executing an ioctl() */
|
/* Use by accel-block: CPU is executing an ioctl() */
|
||||||
QemuLockCnt in_ioctl_lock;
|
QemuLockCnt in_ioctl_lock;
|
||||||
|
@ -546,8 +547,6 @@ struct CPUState {
|
||||||
int32_t exception_index;
|
int32_t exception_index;
|
||||||
|
|
||||||
AccelCPUState *accel;
|
AccelCPUState *accel;
|
||||||
/* shared by kvm and hvf */
|
|
||||||
bool vcpu_dirty;
|
|
||||||
|
|
||||||
/* Used to keep track of an outstanding cpu throttle thread for migration
|
/* Used to keep track of an outstanding cpu throttle thread for migration
|
||||||
* autoconverge
|
* autoconverge
|
||||||
|
|
|
@ -55,6 +55,7 @@ struct AccelCPUState {
|
||||||
bool vtimer_masked;
|
bool vtimer_masked;
|
||||||
sigset_t unblock_ipi_mask;
|
sigset_t unblock_ipi_mask;
|
||||||
bool guest_debug_enabled;
|
bool guest_debug_enabled;
|
||||||
|
bool dirty;
|
||||||
};
|
};
|
||||||
|
|
||||||
void assert_hvf_ok(hv_return_t ret);
|
void assert_hvf_ok(hv_return_t ret);
|
||||||
|
|
|
@ -806,9 +806,9 @@ int hvf_put_registers(CPUState *cpu)
|
||||||
|
|
||||||
static void flush_cpu_state(CPUState *cpu)
|
static void flush_cpu_state(CPUState *cpu)
|
||||||
{
|
{
|
||||||
if (cpu->vcpu_dirty) {
|
if (cpu->accel->dirty) {
|
||||||
hvf_put_registers(cpu);
|
hvf_put_registers(cpu);
|
||||||
cpu->vcpu_dirty = false;
|
cpu->accel->dirty = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -419,9 +419,9 @@ int hvf_vcpu_exec(CPUState *cpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (cpu->vcpu_dirty) {
|
if (cpu->accel->dirty) {
|
||||||
hvf_put_registers(cpu);
|
hvf_put_registers(cpu);
|
||||||
cpu->vcpu_dirty = false;
|
cpu->accel->dirty = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (hvf_inject_interrupts(cpu)) {
|
if (hvf_inject_interrupts(cpu)) {
|
||||||
|
|
|
@ -427,7 +427,7 @@ int hvf_process_events(CPUState *cs)
|
||||||
X86CPU *cpu = X86_CPU(cs);
|
X86CPU *cpu = X86_CPU(cs);
|
||||||
CPUX86State *env = &cpu->env;
|
CPUX86State *env = &cpu->env;
|
||||||
|
|
||||||
if (!cs->vcpu_dirty) {
|
if (!cs->accel->dirty) {
|
||||||
/* light weight sync for CPU_INTERRUPT_HARD and IF_MASK */
|
/* light weight sync for CPU_INTERRUPT_HARD and IF_MASK */
|
||||||
env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS);
|
env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue