target/i386/kvm: Add feature bit definitions for KVM CPUID

Add feature definitions for KVM_CPUID_FEATURES in CPUID (
CPUID[4000_0001].EAX and CPUID[4000_0001].EDX), to get rid of lots of
offset calculations.

Signed-off-by: Zhao Liu <zhao1.liu@intel.com>
Reviewed-by: Zide Chen <zide.chen@intel.com>
Link: https://lore.kernel.org/r/20241106030728.553238-3-zhao1.liu@intel.com
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Zhao Liu 2024-11-06 11:07:19 +08:00 committed by Paolo Bonzini
parent cf4c263551
commit cee1f341ce
3 changed files with 39 additions and 17 deletions

View file

@ -27,7 +27,6 @@
#include "qapi/error.h"
#include <linux/kvm.h>
#include "standard-headers/asm-x86/kvm_para.h"
#include "qom/object.h"
#define TYPE_KVM_CLOCK "kvmclock"
@ -333,8 +332,8 @@ void kvmclock_create(bool create_always)
assert(kvm_enabled());
if (create_always ||
cpu->env.features[FEAT_KVM] & ((1ULL << KVM_FEATURE_CLOCKSOURCE) |
(1ULL << KVM_FEATURE_CLOCKSOURCE2))) {
cpu->env.features[FEAT_KVM] & (CPUID_KVM_CLOCK |
CPUID_KVM_CLOCK2)) {
sysbus_create_simple(TYPE_KVM_CLOCK, -1, NULL);
}
}

View file

@ -29,6 +29,7 @@
#include "qapi/qapi-types-common.h"
#include "qemu/cpu-float.h"
#include "qemu/timer.h"
#include "standard-headers/asm-x86/kvm_para.h"
#define XEN_NR_VIRQS 24
@ -1010,6 +1011,28 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_8000_0007_EBX_OVERFLOW_RECOV (1U << 0)
#define CPUID_8000_0007_EBX_SUCCOR (1U << 1)
/* (Old) KVM paravirtualized clocksource */
#define CPUID_KVM_CLOCK (1U << KVM_FEATURE_CLOCKSOURCE)
/* (New) KVM specific paravirtualized clocksource */
#define CPUID_KVM_CLOCK2 (1U << KVM_FEATURE_CLOCKSOURCE2)
/* KVM asynchronous page fault */
#define CPUID_KVM_ASYNCPF (1U << KVM_FEATURE_ASYNC_PF)
/* KVM stolen (when guest vCPU is not running) time accounting */
#define CPUID_KVM_STEAL_TIME (1U << KVM_FEATURE_STEAL_TIME)
/* KVM paravirtualized end-of-interrupt signaling */
#define CPUID_KVM_PV_EOI (1U << KVM_FEATURE_PV_EOI)
/* KVM paravirtualized spinlocks support */
#define CPUID_KVM_PV_UNHALT (1U << KVM_FEATURE_PV_UNHALT)
/* KVM host-side polling on HLT control from the guest */
#define CPUID_KVM_POLL_CONTROL (1U << KVM_FEATURE_POLL_CONTROL)
/* KVM interrupt based asynchronous page fault*/
#define CPUID_KVM_ASYNCPF_INT (1U << KVM_FEATURE_ASYNC_PF_INT)
/* KVM 'Extended Destination ID' support for external interrupts */
#define CPUID_KVM_MSI_EXT_DEST_ID (1U << KVM_FEATURE_MSI_EXT_DEST_ID)
/* Hint to KVM that vCPUs expect never preempted for an unlimited time */
#define CPUID_KVM_HINTS_REALTIME (1U << KVM_HINTS_REALTIME)
/* CLZERO instruction */
#define CPUID_8000_0008_EBX_CLZERO (1U << 0)
/* Always save/restore FP error pointers */

View file

@ -564,13 +564,13 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
* be enabled without the in-kernel irqchip
*/
if (!kvm_irqchip_in_kernel()) {
ret &= ~(1U << KVM_FEATURE_PV_UNHALT);
ret &= ~CPUID_KVM_PV_UNHALT;
}
if (kvm_irqchip_is_split()) {
ret |= 1U << KVM_FEATURE_MSI_EXT_DEST_ID;
ret |= CPUID_KVM_MSI_EXT_DEST_ID;
}
} else if (function == KVM_CPUID_FEATURES && reg == R_EDX) {
ret |= 1U << KVM_HINTS_REALTIME;
ret |= CPUID_KVM_HINTS_REALTIME;
}
if (current_machine->cgs) {
@ -3978,20 +3978,20 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) {
if (env->features[FEAT_KVM] & CPUID_KVM_ASYNCPF_INT) {
kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, env->async_pf_int_msr);
}
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
if (env->features[FEAT_KVM] & CPUID_KVM_ASYNCPF) {
kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
}
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
if (env->features[FEAT_KVM] & CPUID_KVM_PV_EOI) {
kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
}
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
if (env->features[FEAT_KVM] & CPUID_KVM_STEAL_TIME) {
kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
}
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
if (env->features[FEAT_KVM] & CPUID_KVM_POLL_CONTROL) {
kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr);
}
@ -4456,19 +4456,19 @@ static int kvm_get_msrs(X86CPU *cpu)
#endif
kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) {
if (env->features[FEAT_KVM] & CPUID_KVM_ASYNCPF_INT) {
kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, 0);
}
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
if (env->features[FEAT_KVM] & CPUID_KVM_ASYNCPF) {
kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
}
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
if (env->features[FEAT_KVM] & CPUID_KVM_PV_EOI) {
kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
}
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
if (env->features[FEAT_KVM] & CPUID_KVM_STEAL_TIME) {
kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
}
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
if (env->features[FEAT_KVM] & CPUID_KVM_POLL_CONTROL) {
kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1);
}
if (has_architectural_pmu_version > 0) {
@ -6195,7 +6195,7 @@ uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address)
return address;
}
env = &X86_CPU(first_cpu)->env;
if (!(env->features[FEAT_KVM] & (1 << KVM_FEATURE_MSI_EXT_DEST_ID))) {
if (!(env->features[FEAT_KVM] & CPUID_KVM_MSI_EXT_DEST_ID)) {
return address;
}