mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-02 23:33:54 -06:00
KVM: Coalesced MMIO support
MMIO exits are more expensive in KVM or Xen than in QEMU because they involve, at least, privilege transitions. However, MMIO write operations can be effectively batched if those writes do not have side effects. Good examples of this include VGA pixel operations when in a planar mode. As it turns out, we can get a nice boost in other areas too. Laurent mentioned a 9.7% performance boost in iperf with the coalesced MMIO changes for the e1000 when he originally posted this work for KVM. Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5961 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
parent
d85dc283fa
commit
f65ed4c152
8 changed files with 112 additions and 0 deletions
72
kvm-all.c
72
kvm-all.c
|
@ -24,6 +24,9 @@
|
|||
#include "sysemu.h"
|
||||
#include "kvm.h"
|
||||
|
||||
/* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */
|
||||
#define PAGE_SIZE TARGET_PAGE_SIZE
|
||||
|
||||
//#define DEBUG_KVM
|
||||
|
||||
#ifdef DEBUG_KVM
|
||||
|
@ -52,6 +55,7 @@ struct KVMState
|
|||
KVMSlot slots[32];
|
||||
int fd;
|
||||
int vmfd;
|
||||
int coalesced_mmio;
|
||||
};
|
||||
|
||||
static KVMState *kvm_state;
|
||||
|
@ -228,6 +232,44 @@ out:
|
|||
qemu_free(d.dirty_bitmap);
|
||||
}
|
||||
|
||||
int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
|
||||
{
|
||||
int ret = -ENOSYS;
|
||||
#ifdef KVM_CAP_COALESCED_MMIO
|
||||
KVMState *s = kvm_state;
|
||||
|
||||
if (s->coalesced_mmio) {
|
||||
struct kvm_coalesced_mmio_zone zone;
|
||||
|
||||
zone.addr = start;
|
||||
zone.size = size;
|
||||
|
||||
ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
|
||||
}
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
|
||||
{
|
||||
int ret = -ENOSYS;
|
||||
#ifdef KVM_CAP_COALESCED_MMIO
|
||||
KVMState *s = kvm_state;
|
||||
|
||||
if (s->coalesced_mmio) {
|
||||
struct kvm_coalesced_mmio_zone zone;
|
||||
|
||||
zone.addr = start;
|
||||
zone.size = size;
|
||||
|
||||
ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
|
||||
}
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int kvm_init(int smp_cpus)
|
||||
{
|
||||
KVMState *s;
|
||||
|
@ -298,6 +340,13 @@ int kvm_init(int smp_cpus)
|
|||
goto err;
|
||||
}
|
||||
|
||||
s->coalesced_mmio = 0;
|
||||
#ifdef KVM_CAP_COALESCED_MMIO
|
||||
ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_COALESCED_MMIO);
|
||||
if (ret > 0)
|
||||
s->coalesced_mmio = ret;
|
||||
#endif
|
||||
|
||||
ret = kvm_arch_init(s, smp_cpus);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
@ -357,6 +406,27 @@ static int kvm_handle_io(CPUState *env, uint16_t port, void *data,
|
|||
return 1;
|
||||
}
|
||||
|
||||
static void kvm_run_coalesced_mmio(CPUState *env, struct kvm_run *run)
|
||||
{
|
||||
#ifdef KVM_CAP_COALESCED_MMIO
|
||||
KVMState *s = kvm_state;
|
||||
if (s->coalesced_mmio) {
|
||||
struct kvm_coalesced_mmio_ring *ring;
|
||||
|
||||
ring = (void *)run + (s->coalesced_mmio * TARGET_PAGE_SIZE);
|
||||
while (ring->first != ring->last) {
|
||||
struct kvm_coalesced_mmio *ent;
|
||||
|
||||
ent = &ring->coalesced_mmio[ring->first];
|
||||
|
||||
cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
|
||||
/* FIXME smp_wmb() */
|
||||
ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
int kvm_cpu_exec(CPUState *env)
|
||||
{
|
||||
struct kvm_run *run = env->kvm_run;
|
||||
|
@ -387,6 +457,8 @@ int kvm_cpu_exec(CPUState *env)
|
|||
abort();
|
||||
}
|
||||
|
||||
kvm_run_coalesced_mmio(env, run);
|
||||
|
||||
ret = 0; /* exit loop */
|
||||
switch (run->exit_reason) {
|
||||
case KVM_EXIT_IO:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue