aio / timers: Switch entire codebase to the new timer API

This is an autogenerated patch using scripts/switch-timer-api.

Switch the entire code base to using the new timer API.

Note this patch may introduce some line length issues.

Signed-off-by: Alex Bligh <alex@alex.org.uk>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Alex Bligh 2013-08-21 16:03:08 +01:00 committed by Stefan Hajnoczi
parent fe10ab540b
commit bc72ad6754
121 changed files with 678 additions and 678 deletions

View file

@ -170,7 +170,7 @@ static uint64_t arm_sysctl_read(void *opaque, hwaddr offset,
case 0x58: /* BOOTCS */
return 0;
case 0x5c: /* 24MHz */
return muldiv64(qemu_get_clock_ns(vm_clock), 24000000, get_ticks_per_sec());
return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 24000000, get_ticks_per_sec());
case 0x60: /* MISC */
return 0;
case 0x84: /* PROCID0 */

View file

@ -128,7 +128,7 @@ static unsigned int get_counter(CUDATimer *s)
int64_t d;
unsigned int counter;
d = muldiv64(qemu_get_clock_ns(vm_clock) - s->load_time,
d = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->load_time,
CUDA_TIMER_FREQ, get_ticks_per_sec());
if (s->index == 0) {
/* the timer goes down from latch to -1 (period of latch + 2) */
@ -147,7 +147,7 @@ static unsigned int get_counter(CUDATimer *s)
static void set_counter(CUDAState *s, CUDATimer *ti, unsigned int val)
{
CUDA_DPRINTF("T%d.counter=%d\n", 1 + (ti->timer == NULL), val);
ti->load_time = qemu_get_clock_ns(vm_clock);
ti->load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
ti->counter_value = val;
cuda_timer_update(s, ti, ti->load_time);
}
@ -191,10 +191,10 @@ static void cuda_timer_update(CUDAState *s, CUDATimer *ti,
if (!ti->timer)
return;
if ((s->acr & T1MODE) != T1MODE_CONT) {
qemu_del_timer(ti->timer);
timer_del(ti->timer);
} else {
ti->next_irq_time = get_next_irq_time(ti, current_time);
qemu_mod_timer(ti->timer, ti->next_irq_time);
timer_mod(ti->timer, ti->next_irq_time);
}
}
@ -304,7 +304,7 @@ static void cuda_writeb(void *opaque, hwaddr addr, uint32_t val)
break;
case 4:
s->timers[0].latch = (s->timers[0].latch & 0xff00) | val;
cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
break;
case 5:
s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8);
@ -313,12 +313,12 @@ static void cuda_writeb(void *opaque, hwaddr addr, uint32_t val)
break;
case 6:
s->timers[0].latch = (s->timers[0].latch & 0xff00) | val;
cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
break;
case 7:
s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8);
s->ifr &= ~T1_INT;
cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
break;
case 8:
s->timers[1].latch = val;
@ -332,7 +332,7 @@ static void cuda_writeb(void *opaque, hwaddr addr, uint32_t val)
break;
case 11:
s->acr = val;
cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
cuda_update(s);
break;
case 12:
@ -463,8 +463,8 @@ static void cuda_adb_poll(void *opaque)
obuf[1] = 0x40; /* polled data */
cuda_send_packet_to_host(s, obuf, olen + 2);
}
qemu_mod_timer(s->adb_poll_timer,
qemu_get_clock_ns(vm_clock) +
timer_mod(s->adb_poll_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(get_ticks_per_sec() / CUDA_ADB_POLL_FREQ));
}
@ -481,11 +481,11 @@ static void cuda_receive_packet(CUDAState *s,
if (autopoll != s->autopoll) {
s->autopoll = autopoll;
if (autopoll) {
qemu_mod_timer(s->adb_poll_timer,
qemu_get_clock_ns(vm_clock) +
timer_mod(s->adb_poll_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(get_ticks_per_sec() / CUDA_ADB_POLL_FREQ));
} else {
qemu_del_timer(s->adb_poll_timer);
timer_del(s->adb_poll_timer);
}
}
obuf[0] = CUDA_PACKET;
@ -494,14 +494,14 @@ static void cuda_receive_packet(CUDAState *s,
break;
case CUDA_SET_TIME:
ti = (((uint32_t)data[1]) << 24) + (((uint32_t)data[2]) << 16) + (((uint32_t)data[3]) << 8) + data[4];
s->tick_offset = ti - (qemu_get_clock_ns(vm_clock) / get_ticks_per_sec());
s->tick_offset = ti - (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / get_ticks_per_sec());
obuf[0] = CUDA_PACKET;
obuf[1] = 0;
obuf[2] = 0;
cuda_send_packet_to_host(s, obuf, 3);
break;
case CUDA_GET_TIME:
ti = s->tick_offset + (qemu_get_clock_ns(vm_clock) / get_ticks_per_sec());
ti = s->tick_offset + (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / get_ticks_per_sec());
obuf[0] = CUDA_PACKET;
obuf[1] = 0;
obuf[2] = 0;
@ -689,12 +689,12 @@ static void cuda_realizefn(DeviceState *dev, Error **errp)
CUDAState *s = CUDA(dev);
struct tm tm;
s->timers[0].timer = qemu_new_timer_ns(vm_clock, cuda_timer1, s);
s->timers[0].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_timer1, s);
qemu_get_timedate(&tm, 0);
s->tick_offset = (uint32_t)mktimegm(&tm) + RTC_OFFSET;
s->adb_poll_timer = qemu_new_timer_ns(vm_clock, cuda_adb_poll, s);
s->adb_poll_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_adb_poll, s);
}
static void cuda_initfn(Object *obj)

View file

@ -245,10 +245,10 @@ static uint64_t timer_read(void *opaque, hwaddr addr, unsigned size)
switch (addr) {
case 0x38:
value = qemu_get_clock_ns(vm_clock);
value = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
break;
case 0x3c:
value = qemu_get_clock_ns(vm_clock) >> 32;
value = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 32;
break;
}

View file

@ -276,8 +276,8 @@ static void vfio_intx_mmap_enable(void *opaque)
VFIODevice *vdev = opaque;
if (vdev->intx.pending) {
qemu_mod_timer(vdev->intx.mmap_timer,
qemu_get_clock_ms(vm_clock) + vdev->intx.mmap_timeout);
timer_mod(vdev->intx.mmap_timer,
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
return;
}
@ -300,8 +300,8 @@ static void vfio_intx_interrupt(void *opaque)
qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 1);
vfio_mmap_set_enabled(vdev, false);
if (vdev->intx.mmap_timeout) {
qemu_mod_timer(vdev->intx.mmap_timer,
qemu_get_clock_ms(vm_clock) + vdev->intx.mmap_timeout);
timer_mod(vdev->intx.mmap_timer,
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
}
}
@ -543,7 +543,7 @@ static void vfio_disable_intx(VFIODevice *vdev)
{
int fd;
qemu_del_timer(vdev->intx.mmap_timer);
timer_del(vdev->intx.mmap_timer);
vfio_disable_intx_kvm(vdev);
vfio_disable_irqindex(vdev, VFIO_PCI_INTX_IRQ_INDEX);
vdev->intx.pending = false;
@ -3176,7 +3176,7 @@ static int vfio_initfn(PCIDevice *pdev)
}
if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
vdev->intx.mmap_timer = qemu_new_timer_ms(vm_clock,
vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
vfio_intx_mmap_enable, vdev);
pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_update_irq);
ret = vfio_enable_intx(vdev);
@ -3210,7 +3210,7 @@ static void vfio_exitfn(PCIDevice *pdev)
pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
vfio_disable_interrupts(vdev);
if (vdev->intx.mmap_timer) {
qemu_free_timer(vdev->intx.mmap_timer);
timer_free(vdev->intx.mmap_timer);
}
vfio_teardown_msi(vdev);
vfio_unmap_bars(vdev);