target-i386/kvm: Hyper-V HV_X64_MSR_VP_RUNTIME support

HV_X64_MSR_VP_RUNTIME msr used by guest to get
"the time the virtual processor consumes running guest code,
and the time the associated logical processor spends running
hypervisor code on behalf of that guest."

Calculation of that time is performed by task_cputime_adjusted()
for vcpu task by KVM side.

Signed-off-by: Andrey Smetanin <asmetanin@virtuozzo.com>
Signed-off-by: Denis V. Lunev <den@openvz.org>
CC: Paolo Bonzini <pbonzini@redhat.com>
CC: Richard Henderson <rth@twiddle.net>
CC: Eduardo Habkost <ehabkost@redhat.com>
CC: "Andreas Färber" <afaerber@suse.de>
CC: Marcelo Tosatti <mtosatti@redhat.com>
Message-Id: <1442397584-16698-4-git-send-email-den@openvz.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Andrey Smetanin 2015-09-16 12:59:44 +03:00 committed by Paolo Bonzini
parent 8c145d7ca9
commit 46eb8f98f2
5 changed files with 43 additions and 1 deletions

View file

@ -84,6 +84,7 @@ static bool has_msr_hv_tsc;
static bool has_msr_hv_crash;
static bool has_msr_hv_reset;
static bool has_msr_hv_vpindex;
static bool has_msr_hv_runtime;
static bool has_msr_mtrr;
static bool has_msr_xss;
@ -464,7 +465,8 @@ static bool hyperv_enabled(X86CPU *cpu)
cpu->hyperv_relaxed_timing ||
cpu->hyperv_crash ||
cpu->hyperv_reset ||
cpu->hyperv_vpindex);
cpu->hyperv_vpindex ||
cpu->hyperv_runtime);
}
static Error *invtsc_mig_blocker;
@ -539,6 +541,9 @@ int kvm_arch_init_vcpu(CPUState *cs)
if (cpu->hyperv_vpindex && has_msr_hv_vpindex) {
c->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
}
if (cpu->hyperv_runtime && has_msr_hv_runtime) {
c->eax |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
}
c = &cpuid_data.entries[cpuid_i++];
c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
if (cpu->hyperv_relaxed_timing) {
@ -875,6 +880,10 @@ static int kvm_get_supported_msrs(KVMState *s)
has_msr_hv_vpindex = true;
continue;
}
if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_RUNTIME) {
has_msr_hv_runtime = true;
continue;
}
}
}
@ -1420,6 +1429,10 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_CRASH_CTL,
HV_X64_MSR_CRASH_CTL_NOTIFY);
}
if (has_msr_hv_runtime) {
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_VP_RUNTIME,
env->msr_hv_runtime);
}
if (has_msr_mtrr) {
kvm_msr_entry_set(&msrs[n++], MSR_MTRRdefType, env->mtrr_deftype);
kvm_msr_entry_set(&msrs[n++],
@ -1785,6 +1798,9 @@ static int kvm_get_msrs(X86CPU *cpu)
msrs[n++].index = HV_X64_MSR_CRASH_P0 + j;
}
}
if (has_msr_hv_runtime) {
msrs[n++].index = HV_X64_MSR_VP_RUNTIME;
}
if (has_msr_mtrr) {
msrs[n++].index = MSR_MTRRdefType;
msrs[n++].index = MSR_MTRRfix64K_00000;
@ -1938,6 +1954,9 @@ static int kvm_get_msrs(X86CPU *cpu)
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
break;
case HV_X64_MSR_VP_RUNTIME:
env->msr_hv_runtime = msrs[i].data;
break;
case MSR_MTRRdefType:
env->mtrr_deftype = msrs[i].data;
break;