hw/xen: Implement EVTCHNOP_bind_virq

Add the array of virq ports to each vCPU so that we can deliver timers,
debug ports, etc. Global virqs are allocated against vCPU 0 initially,
but can be migrated to other vCPUs (when we implement that).

The kernel needs to know about VIRQ_TIMER in order to accelerate timers,
so tell it via KVM_XEN_VCPU_ATTR_TYPE_TIMER. Also save/restore the value
of the singleshot timer across migration, as the kernel will handle the
hypercalls automatically now.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Reviewed-by: Paul Durrant <paul@xen.org>
This commit is contained in:
David Woodhouse 2022-12-13 22:40:56 +00:00
parent 190cc3c0ed
commit c723d4c15e
6 changed files with 185 additions and 0 deletions

View file

@ -355,6 +355,53 @@ void kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id, int type)
}
}
static int kvm_xen_set_vcpu_timer(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
struct kvm_xen_vcpu_attr va = {
.type = KVM_XEN_VCPU_ATTR_TYPE_TIMER,
.u.timer.port = env->xen_virq[VIRQ_TIMER],
.u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL,
.u.timer.expires_ns = env->xen_singleshot_timer_ns,
};
return kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &va);
}
static void do_set_vcpu_timer_virq(CPUState *cs, run_on_cpu_data data)
{
kvm_xen_set_vcpu_timer(cs);
}
int kvm_xen_set_vcpu_virq(uint32_t vcpu_id, uint16_t virq, uint16_t port)
{
CPUState *cs = qemu_get_cpu(vcpu_id);
if (!cs) {
return -ENOENT;
}
/* cpu.h doesn't include the actual Xen header. */
qemu_build_assert(NR_VIRQS == XEN_NR_VIRQS);
if (virq >= NR_VIRQS) {
return -EINVAL;
}
if (port && X86_CPU(cs)->env.xen_virq[virq]) {
return -EEXIST;
}
X86_CPU(cs)->env.xen_virq[virq] = port;
if (virq == VIRQ_TIMER && kvm_xen_has_cap(EVTCHN_SEND)) {
async_run_on_cpu(cs, do_set_vcpu_timer_virq,
RUN_ON_CPU_HOST_INT(port));
}
return 0;
}
static void do_set_vcpu_time_info_gpa(CPUState *cs, run_on_cpu_data data)
{
X86CPU *cpu = X86_CPU(cs);
@ -387,6 +434,8 @@ static void do_vcpu_soft_reset(CPUState *cs, run_on_cpu_data data)
env->xen_vcpu_time_info_gpa = INVALID_GPA;
env->xen_vcpu_runstate_gpa = INVALID_GPA;
env->xen_vcpu_callback_vector = 0;
env->xen_singleshot_timer_ns = 0;
memset(env->xen_virq, 0, sizeof(env->xen_virq));
set_vcpu_info(cs, INVALID_GPA);
kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO,
@ -395,6 +444,7 @@ static void do_vcpu_soft_reset(CPUState *cs, run_on_cpu_data data)
INVALID_GPA);
if (kvm_xen_has_cap(EVTCHN_SEND)) {
kvm_xen_set_vcpu_callback_vector(cs);
kvm_xen_set_vcpu_timer(cs);
}
}
@ -829,6 +879,21 @@ static bool kvm_xen_hcall_evtchn_op(struct kvm_xen_exit *exit, X86CPU *cpu,
err = xen_evtchn_unmask_op(&unmask);
break;
}
case EVTCHNOP_bind_virq: {
struct evtchn_bind_virq virq;
qemu_build_assert(sizeof(virq) == 12);
if (kvm_copy_from_gva(cs, arg, &virq, sizeof(virq))) {
err = -EFAULT;
break;
}
err = xen_evtchn_bind_virq_op(&virq);
if (!err && kvm_copy_to_gva(cs, arg, &virq, sizeof(virq))) {
err = -EFAULT;
}
break;
}
default:
return false;
}
@ -1060,6 +1125,12 @@ int kvm_put_xen_state(CPUState *cs)
}
}
if (env->xen_virq[VIRQ_TIMER]) {
ret = kvm_xen_set_vcpu_timer(cs);
if (ret < 0) {
return ret;
}
}
return 0;
}
@ -1068,6 +1139,7 @@ int kvm_get_xen_state(CPUState *cs)
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
uint64_t gpa;
int ret;
/*
* The kernel does not mark vcpu_info as dirty when it delivers interrupts
@ -1090,5 +1162,24 @@ int kvm_get_xen_state(CPUState *cs)
}
}
if (!kvm_xen_has_cap(EVTCHN_SEND)) {
return 0;
}
/*
* If the kernel is accelerating timers, read out the current value of the
* singleshot timer deadline.
*/
if (env->xen_virq[VIRQ_TIMER]) {
struct kvm_xen_vcpu_attr va = {
.type = KVM_XEN_VCPU_ATTR_TYPE_TIMER,
};
ret = kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_GET_ATTR, &va);
if (ret < 0) {
return ret;
}
env->xen_singleshot_timer_ns = va.u.timer.expires_ns;
}
return 0;
}