mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-07 17:53:56 -06:00
Merge remote-tracking branch 'sstabellini/for_1.1_rc3' into staging
* sstabellini/for_1.1_rc3: Call xc_domain_shutdown with the reboot flag when the guest requests a reboot. xen: Fix PV-on-HVM xen_disk: properly update stats in ioreq_release() xen_disk: use bdrv_aio_flush instead of bdrv_flush xen_disk: remove syncwrite option xen: disable rtc_clock xen: do not initialize the interval timer and PCSPK emulator
This commit is contained in:
commit
dd86df756e
4 changed files with 56 additions and 33 deletions
23
hw/pc.c
23
hw/pc.c
|
@ -47,6 +47,7 @@
|
|||
#include "ui/qemu-spice.h"
|
||||
#include "memory.h"
|
||||
#include "exec-memory.h"
|
||||
#include "arch_init.h"
|
||||
|
||||
/* output Bochs bios info messages */
|
||||
//#define DEBUG_BIOS
|
||||
|
@ -1097,7 +1098,7 @@ void pc_basic_device_init(ISABus *isa_bus, qemu_irq *gsi,
|
|||
qemu_irq pit_alt_irq = NULL;
|
||||
qemu_irq rtc_irq = NULL;
|
||||
qemu_irq *a20_line;
|
||||
ISADevice *i8042, *port92, *vmmouse, *pit;
|
||||
ISADevice *i8042, *port92, *vmmouse, *pit = NULL;
|
||||
qemu_irq *cpu_exit_irq;
|
||||
|
||||
register_ioport_write(0x80, 1, 1, ioport80_write, NULL);
|
||||
|
@ -1126,16 +1127,18 @@ void pc_basic_device_init(ISABus *isa_bus, qemu_irq *gsi,
|
|||
|
||||
qemu_register_boot_set(pc_boot_set, *rtc_state);
|
||||
|
||||
if (kvm_irqchip_in_kernel()) {
|
||||
pit = kvm_pit_init(isa_bus, 0x40);
|
||||
} else {
|
||||
pit = pit_init(isa_bus, 0x40, pit_isa_irq, pit_alt_irq);
|
||||
if (!xen_enabled()) {
|
||||
if (kvm_irqchip_in_kernel()) {
|
||||
pit = kvm_pit_init(isa_bus, 0x40);
|
||||
} else {
|
||||
pit = pit_init(isa_bus, 0x40, pit_isa_irq, pit_alt_irq);
|
||||
}
|
||||
if (hpet) {
|
||||
/* connect PIT to output control line of the HPET */
|
||||
qdev_connect_gpio_out(hpet, 0, qdev_get_gpio_in(&pit->qdev, 0));
|
||||
}
|
||||
pcspk_init(isa_bus, pit);
|
||||
}
|
||||
if (hpet) {
|
||||
/* connect PIT to output control line of the HPET */
|
||||
qdev_connect_gpio_out(hpet, 0, qdev_get_gpio_in(&pit->qdev, 0));
|
||||
}
|
||||
pcspk_init(isa_bus, pit);
|
||||
|
||||
for(i = 0; i < MAX_SERIAL_PORTS; i++) {
|
||||
if (serial_hds[i]) {
|
||||
|
|
|
@ -148,6 +148,6 @@ static inline int xen_xc_hvm_inject_msi(XenXC xen_xc, domid_t dom,
|
|||
}
|
||||
#endif
|
||||
|
||||
void destroy_hvm_domain(void);
|
||||
void destroy_hvm_domain(bool reboot);
|
||||
|
||||
#endif /* QEMU_HW_XEN_COMMON_H */
|
||||
|
|
|
@ -48,7 +48,6 @@
|
|||
|
||||
/* ------------------------------------------------------------- */
|
||||
|
||||
static int syncwrite = 0;
|
||||
static int batch_maps = 0;
|
||||
|
||||
static int max_requests = 32;
|
||||
|
@ -67,6 +66,7 @@ struct ioreq {
|
|||
QEMUIOVector v;
|
||||
int presync;
|
||||
int postsync;
|
||||
uint8_t mapped;
|
||||
|
||||
/* grant mapping */
|
||||
uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
|
||||
|
@ -154,7 +154,7 @@ static void ioreq_finish(struct ioreq *ioreq)
|
|||
blkdev->requests_finished++;
|
||||
}
|
||||
|
||||
static void ioreq_release(struct ioreq *ioreq)
|
||||
static void ioreq_release(struct ioreq *ioreq, bool finish)
|
||||
{
|
||||
struct XenBlkDev *blkdev = ioreq->blkdev;
|
||||
|
||||
|
@ -162,7 +162,11 @@ static void ioreq_release(struct ioreq *ioreq)
|
|||
memset(ioreq, 0, sizeof(*ioreq));
|
||||
ioreq->blkdev = blkdev;
|
||||
QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
|
||||
blkdev->requests_finished--;
|
||||
if (finish) {
|
||||
blkdev->requests_finished--;
|
||||
} else {
|
||||
blkdev->requests_inflight--;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -189,15 +193,10 @@ static int ioreq_parse(struct ioreq *ioreq)
|
|||
ioreq->presync = 1;
|
||||
return 0;
|
||||
}
|
||||
if (!syncwrite) {
|
||||
ioreq->presync = ioreq->postsync = 1;
|
||||
}
|
||||
ioreq->presync = ioreq->postsync = 1;
|
||||
/* fall through */
|
||||
case BLKIF_OP_WRITE:
|
||||
ioreq->prot = PROT_READ; /* from memory */
|
||||
if (syncwrite) {
|
||||
ioreq->postsync = 1;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
|
||||
|
@ -248,7 +247,7 @@ static void ioreq_unmap(struct ioreq *ioreq)
|
|||
XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
|
||||
int i;
|
||||
|
||||
if (ioreq->v.niov == 0) {
|
||||
if (ioreq->v.niov == 0 || ioreq->mapped == 0) {
|
||||
return;
|
||||
}
|
||||
if (batch_maps) {
|
||||
|
@ -274,6 +273,7 @@ static void ioreq_unmap(struct ioreq *ioreq)
|
|||
ioreq->page[i] = NULL;
|
||||
}
|
||||
}
|
||||
ioreq->mapped = 0;
|
||||
}
|
||||
|
||||
static int ioreq_map(struct ioreq *ioreq)
|
||||
|
@ -281,7 +281,7 @@ static int ioreq_map(struct ioreq *ioreq)
|
|||
XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
|
||||
int i;
|
||||
|
||||
if (ioreq->v.niov == 0) {
|
||||
if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
|
||||
return 0;
|
||||
}
|
||||
if (batch_maps) {
|
||||
|
@ -313,9 +313,12 @@ static int ioreq_map(struct ioreq *ioreq)
|
|||
ioreq->blkdev->cnt_map++;
|
||||
}
|
||||
}
|
||||
ioreq->mapped = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
|
||||
|
||||
static void qemu_aio_complete(void *opaque, int ret)
|
||||
{
|
||||
struct ioreq *ioreq = opaque;
|
||||
|
@ -327,11 +330,19 @@ static void qemu_aio_complete(void *opaque, int ret)
|
|||
}
|
||||
|
||||
ioreq->aio_inflight--;
|
||||
if (ioreq->presync) {
|
||||
ioreq->presync = 0;
|
||||
ioreq_runio_qemu_aio(ioreq);
|
||||
return;
|
||||
}
|
||||
if (ioreq->aio_inflight > 0) {
|
||||
return;
|
||||
}
|
||||
if (ioreq->postsync) {
|
||||
bdrv_flush(ioreq->blkdev->bs);
|
||||
ioreq->postsync = 0;
|
||||
ioreq->aio_inflight++;
|
||||
bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
|
||||
return;
|
||||
}
|
||||
|
||||
ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
|
||||
|
@ -351,7 +362,8 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
|
|||
|
||||
ioreq->aio_inflight++;
|
||||
if (ioreq->presync) {
|
||||
bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */
|
||||
bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (ioreq->req.operation) {
|
||||
|
@ -449,7 +461,7 @@ static void blk_send_response_all(struct XenBlkDev *blkdev)
|
|||
while (!QLIST_EMPTY(&blkdev->finished)) {
|
||||
ioreq = QLIST_FIRST(&blkdev->finished);
|
||||
send_notify += blk_send_response_one(ioreq);
|
||||
ioreq_release(ioreq);
|
||||
ioreq_release(ioreq, true);
|
||||
}
|
||||
if (send_notify) {
|
||||
xen_be_send_notify(&blkdev->xendev);
|
||||
|
@ -505,7 +517,7 @@ static void blk_handle_requests(struct XenBlkDev *blkdev)
|
|||
if (blk_send_response_one(ioreq)) {
|
||||
xen_be_send_notify(&blkdev->xendev);
|
||||
}
|
||||
ioreq_release(ioreq);
|
||||
ioreq_release(ioreq, false);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue