mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-07 17:53:56 -06:00
Merge remote-tracking branch 'origin/master' into threadpool
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
commit
f563a5d7a8
155 changed files with 6742 additions and 2004 deletions
109
hw/ac97.c
109
hw/ac97.c
|
@ -1226,32 +1226,101 @@ static const VMStateDescription vmstate_ac97 = {
|
|||
}
|
||||
};
|
||||
|
||||
static const MemoryRegionPortio nam_portio[] = {
|
||||
{ 0, 256 * 1, 1, .read = nam_readb, },
|
||||
{ 0, 256 * 2, 2, .read = nam_readw, },
|
||||
{ 0, 256 * 4, 4, .read = nam_readl, },
|
||||
{ 0, 256 * 1, 1, .write = nam_writeb, },
|
||||
{ 0, 256 * 2, 2, .write = nam_writew, },
|
||||
{ 0, 256 * 4, 4, .write = nam_writel, },
|
||||
PORTIO_END_OF_LIST (),
|
||||
};
|
||||
static uint64_t nam_read(void *opaque, hwaddr addr, unsigned size)
|
||||
{
|
||||
if ((addr / size) > 256) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
return nam_readb(opaque, addr);
|
||||
case 2:
|
||||
return nam_readw(opaque, addr);
|
||||
case 4:
|
||||
return nam_readl(opaque, addr);
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
static void nam_write(void *opaque, hwaddr addr, uint64_t val,
|
||||
unsigned size)
|
||||
{
|
||||
if ((addr / size) > 256) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
nam_writeb(opaque, addr, val);
|
||||
break;
|
||||
case 2:
|
||||
nam_writew(opaque, addr, val);
|
||||
break;
|
||||
case 4:
|
||||
nam_writel(opaque, addr, val);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static const MemoryRegionOps ac97_io_nam_ops = {
|
||||
.old_portio = nam_portio,
|
||||
.read = nam_read,
|
||||
.write = nam_write,
|
||||
.impl = {
|
||||
.min_access_size = 1,
|
||||
.max_access_size = 4,
|
||||
},
|
||||
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||
};
|
||||
|
||||
static const MemoryRegionPortio nabm_portio[] = {
|
||||
{ 0, 64 * 1, 1, .read = nabm_readb, },
|
||||
{ 0, 64 * 2, 2, .read = nabm_readw, },
|
||||
{ 0, 64 * 4, 4, .read = nabm_readl, },
|
||||
{ 0, 64 * 1, 1, .write = nabm_writeb, },
|
||||
{ 0, 64 * 2, 2, .write = nabm_writew, },
|
||||
{ 0, 64 * 4, 4, .write = nabm_writel, },
|
||||
PORTIO_END_OF_LIST ()
|
||||
};
|
||||
static uint64_t nabm_read(void *opaque, hwaddr addr, unsigned size)
|
||||
{
|
||||
if ((addr / size) > 64) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
return nabm_readb(opaque, addr);
|
||||
case 2:
|
||||
return nabm_readw(opaque, addr);
|
||||
case 4:
|
||||
return nabm_readl(opaque, addr);
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
static void nabm_write(void *opaque, hwaddr addr, uint64_t val,
|
||||
unsigned size)
|
||||
{
|
||||
if ((addr / size) > 64) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
nabm_writeb(opaque, addr, val);
|
||||
break;
|
||||
case 2:
|
||||
nabm_writew(opaque, addr, val);
|
||||
break;
|
||||
case 4:
|
||||
nabm_writel(opaque, addr, val);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static const MemoryRegionOps ac97_io_nabm_ops = {
|
||||
.old_portio = nabm_portio,
|
||||
.read = nabm_read,
|
||||
.write = nabm_write,
|
||||
.impl = {
|
||||
.min_access_size = 1,
|
||||
.max_access_size = 4,
|
||||
},
|
||||
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||
};
|
||||
|
||||
static void ac97_on_reset (void *opaque)
|
||||
|
|
46
hw/es1370.c
46
hw/es1370.c
|
@ -908,18 +908,44 @@ static void es1370_adc_callback (void *opaque, int avail)
|
|||
es1370_run_channel (s, ADC_CHANNEL, avail);
|
||||
}
|
||||
|
||||
static const MemoryRegionPortio es1370_portio[] = {
|
||||
{ 0, 0x40 * 4, 1, .write = es1370_writeb, },
|
||||
{ 0, 0x40 * 2, 2, .write = es1370_writew, },
|
||||
{ 0, 0x40, 4, .write = es1370_writel, },
|
||||
{ 0, 0x40 * 4, 1, .read = es1370_readb, },
|
||||
{ 0, 0x40 * 2, 2, .read = es1370_readw, },
|
||||
{ 0, 0x40, 4, .read = es1370_readl, },
|
||||
PORTIO_END_OF_LIST ()
|
||||
};
|
||||
static uint64_t es1370_read(void *opaque, hwaddr addr,
|
||||
unsigned size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
return es1370_readb(opaque, addr);
|
||||
case 2:
|
||||
return es1370_readw(opaque, addr);
|
||||
case 4:
|
||||
return es1370_readl(opaque, addr);
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
static void es1370_write(void *opaque, hwaddr addr, uint64_t val,
|
||||
unsigned size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
es1370_writeb(opaque, addr, val);
|
||||
break;
|
||||
case 2:
|
||||
es1370_writew(opaque, addr, val);
|
||||
break;
|
||||
case 4:
|
||||
es1370_writel(opaque, addr, val);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static const MemoryRegionOps es1370_io_ops = {
|
||||
.old_portio = es1370_portio,
|
||||
.read = es1370_read,
|
||||
.write = es1370_write,
|
||||
.impl = {
|
||||
.min_access_size = 1,
|
||||
.max_access_size = 4,
|
||||
},
|
||||
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||
};
|
||||
|
||||
|
|
20
hw/i8254.c
20
hw/i8254.c
|
@ -111,7 +111,8 @@ static void pit_latch_count(PITChannelState *s)
|
|||
}
|
||||
}
|
||||
|
||||
static void pit_ioport_write(void *opaque, uint32_t addr, uint32_t val)
|
||||
static void pit_ioport_write(void *opaque, hwaddr addr,
|
||||
uint64_t val, unsigned size)
|
||||
{
|
||||
PITCommonState *pit = opaque;
|
||||
int channel, access;
|
||||
|
@ -178,7 +179,8 @@ static void pit_ioport_write(void *opaque, uint32_t addr, uint32_t val)
|
|||
}
|
||||
}
|
||||
|
||||
static uint32_t pit_ioport_read(void *opaque, uint32_t addr)
|
||||
static uint64_t pit_ioport_read(void *opaque, hwaddr addr,
|
||||
unsigned size)
|
||||
{
|
||||
PITCommonState *pit = opaque;
|
||||
int ret, count;
|
||||
|
@ -290,14 +292,14 @@ static void pit_irq_control(void *opaque, int n, int enable)
|
|||
}
|
||||
}
|
||||
|
||||
static const MemoryRegionPortio pit_portio[] = {
|
||||
{ 0, 4, 1, .write = pit_ioport_write },
|
||||
{ 0, 3, 1, .read = pit_ioport_read },
|
||||
PORTIO_END_OF_LIST()
|
||||
};
|
||||
|
||||
static const MemoryRegionOps pit_ioport_ops = {
|
||||
.old_portio = pit_portio
|
||||
.read = pit_ioport_read,
|
||||
.write = pit_ioport_write,
|
||||
.impl = {
|
||||
.min_access_size = 1,
|
||||
.max_access_size = 1,
|
||||
},
|
||||
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||
};
|
||||
|
||||
static void pit_post_load(PITCommonState *s)
|
||||
|
|
|
@ -882,8 +882,7 @@ static int assign_intx(AssignedDevice *dev)
|
|||
intx_route = pci_device_route_intx_to_irq(&dev->dev, dev->intpin);
|
||||
assert(intx_route.mode != PCI_INTX_INVERTED);
|
||||
|
||||
if (dev->intx_route.mode == intx_route.mode &&
|
||||
dev->intx_route.irq == intx_route.irq) {
|
||||
if (!pci_intx_route_changed(&dev->intx_route, &intx_route)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -997,12 +996,9 @@ static void assigned_dev_update_msi(PCIDevice *pci_dev)
|
|||
}
|
||||
|
||||
if (ctrl_byte & PCI_MSI_FLAGS_ENABLE) {
|
||||
uint8_t *pos = pci_dev->config + pci_dev->msi_cap;
|
||||
MSIMessage msg;
|
||||
MSIMessage msg = msi_get_message(pci_dev, 0);
|
||||
int virq;
|
||||
|
||||
msg.address = pci_get_long(pos + PCI_MSI_ADDRESS_LO);
|
||||
msg.data = pci_get_word(pos + PCI_MSI_DATA_32);
|
||||
virq = kvm_irqchip_add_msi_route(kvm_state, msg);
|
||||
if (virq < 0) {
|
||||
perror("assigned_dev_update_msi: kvm_irqchip_add_msi_route");
|
||||
|
|
61
hw/m25p80.c
61
hw/m25p80.c
|
@ -72,6 +72,10 @@ typedef struct FlashPartInfo {
|
|||
.page_size = 256,\
|
||||
.flags = (_flags),\
|
||||
|
||||
#define JEDEC_NUMONYX 0x20
|
||||
#define JEDEC_WINBOND 0xEF
|
||||
#define JEDEC_SPANSION 0x01
|
||||
|
||||
static const FlashPartInfo known_devices[] = {
|
||||
/* Atmel -- some are (confusingly) marketed as "DataFlash" */
|
||||
{ INFO("at25fs010", 0x1f6601, 0, 32 << 10, 4, ER_4K) },
|
||||
|
@ -180,17 +184,26 @@ static const FlashPartInfo known_devices[] = {
|
|||
|
||||
typedef enum {
|
||||
NOP = 0,
|
||||
PP = 0x2,
|
||||
READ = 0x3,
|
||||
WRDI = 0x4,
|
||||
RDSR = 0x5,
|
||||
WREN = 0x6,
|
||||
JEDEC_READ = 0x9f,
|
||||
BULK_ERASE = 0xc7,
|
||||
|
||||
READ = 0x3,
|
||||
FAST_READ = 0xb,
|
||||
DOR = 0x3b,
|
||||
QOR = 0x6b,
|
||||
DIOR = 0xbb,
|
||||
QIOR = 0xeb,
|
||||
|
||||
PP = 0x2,
|
||||
DPP = 0xa2,
|
||||
QPP = 0x32,
|
||||
|
||||
ERASE_4K = 0x20,
|
||||
ERASE_32K = 0x52,
|
||||
ERASE_SECTOR = 0xd8,
|
||||
JEDEC_READ = 0x9f,
|
||||
BULK_ERASE = 0xc7,
|
||||
} FlashCMD;
|
||||
|
||||
typedef enum {
|
||||
|
@ -346,11 +359,17 @@ static void complete_collecting_data(Flash *s)
|
|||
s->cur_addr |= s->data[2];
|
||||
|
||||
switch (s->cmd_in_progress) {
|
||||
case DPP:
|
||||
case QPP:
|
||||
case PP:
|
||||
s->state = STATE_PAGE_PROGRAM;
|
||||
break;
|
||||
case READ:
|
||||
case FAST_READ:
|
||||
case DOR:
|
||||
case QOR:
|
||||
case DIOR:
|
||||
case QIOR:
|
||||
s->state = STATE_READ;
|
||||
break;
|
||||
case ERASE_4K:
|
||||
|
@ -374,6 +393,8 @@ static void decode_new_cmd(Flash *s, uint32_t value)
|
|||
case ERASE_32K:
|
||||
case ERASE_SECTOR:
|
||||
case READ:
|
||||
case DPP:
|
||||
case QPP:
|
||||
case PP:
|
||||
s->needed_bytes = 3;
|
||||
s->pos = 0;
|
||||
|
@ -382,12 +403,44 @@ static void decode_new_cmd(Flash *s, uint32_t value)
|
|||
break;
|
||||
|
||||
case FAST_READ:
|
||||
case DOR:
|
||||
case QOR:
|
||||
s->needed_bytes = 4;
|
||||
s->pos = 0;
|
||||
s->len = 0;
|
||||
s->state = STATE_COLLECTING_DATA;
|
||||
break;
|
||||
|
||||
case DIOR:
|
||||
switch ((s->pi->jedec >> 16) & 0xFF) {
|
||||
case JEDEC_WINBOND:
|
||||
case JEDEC_SPANSION:
|
||||
s->needed_bytes = 4;
|
||||
break;
|
||||
case JEDEC_NUMONYX:
|
||||
default:
|
||||
s->needed_bytes = 5;
|
||||
}
|
||||
s->pos = 0;
|
||||
s->len = 0;
|
||||
s->state = STATE_COLLECTING_DATA;
|
||||
break;
|
||||
|
||||
case QIOR:
|
||||
switch ((s->pi->jedec >> 16) & 0xFF) {
|
||||
case JEDEC_WINBOND:
|
||||
case JEDEC_SPANSION:
|
||||
s->needed_bytes = 6;
|
||||
break;
|
||||
case JEDEC_NUMONYX:
|
||||
default:
|
||||
s->needed_bytes = 8;
|
||||
}
|
||||
s->pos = 0;
|
||||
s->len = 0;
|
||||
s->state = STATE_COLLECTING_DATA;
|
||||
break;
|
||||
|
||||
case WRDI:
|
||||
s->write_enable = false;
|
||||
break;
|
||||
|
|
24
hw/m48t59.c
24
hw/m48t59.c
|
@ -27,6 +27,7 @@
|
|||
#include "sysemu.h"
|
||||
#include "sysbus.h"
|
||||
#include "isa.h"
|
||||
#include "exec-memory.h"
|
||||
|
||||
//#define DEBUG_NVRAM
|
||||
|
||||
|
@ -80,6 +81,7 @@ typedef struct M48t59ISAState {
|
|||
typedef struct M48t59SysBusState {
|
||||
SysBusDevice busdev;
|
||||
M48t59State state;
|
||||
MemoryRegion io;
|
||||
} M48t59SysBusState;
|
||||
|
||||
/* Fake timer functions */
|
||||
|
@ -481,7 +483,8 @@ void m48t59_toggle_lock (void *opaque, int lock)
|
|||
}
|
||||
|
||||
/* IO access to NVRAM */
|
||||
static void NVRAM_writeb (void *opaque, uint32_t addr, uint32_t val)
|
||||
static void NVRAM_writeb(void *opaque, hwaddr addr, uint64_t val,
|
||||
unsigned size)
|
||||
{
|
||||
M48t59State *NVRAM = opaque;
|
||||
|
||||
|
@ -504,7 +507,7 @@ static void NVRAM_writeb (void *opaque, uint32_t addr, uint32_t val)
|
|||
}
|
||||
}
|
||||
|
||||
static uint32_t NVRAM_readb (void *opaque, uint32_t addr)
|
||||
static uint64_t NVRAM_readb(void *opaque, hwaddr addr, unsigned size)
|
||||
{
|
||||
M48t59State *NVRAM = opaque;
|
||||
uint32_t retval;
|
||||
|
@ -626,13 +629,14 @@ static void m48t59_reset_sysbus(DeviceState *d)
|
|||
m48t59_reset_common(NVRAM);
|
||||
}
|
||||
|
||||
static const MemoryRegionPortio m48t59_portio[] = {
|
||||
{0, 4, 1, .read = NVRAM_readb, .write = NVRAM_writeb },
|
||||
PORTIO_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static const MemoryRegionOps m48t59_io_ops = {
|
||||
.old_portio = m48t59_portio,
|
||||
.read = NVRAM_readb,
|
||||
.write = NVRAM_writeb,
|
||||
.impl = {
|
||||
.min_access_size = 1,
|
||||
.max_access_size = 1,
|
||||
},
|
||||
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||
};
|
||||
|
||||
/* Initialisation routine */
|
||||
|
@ -653,9 +657,9 @@ M48t59State *m48t59_init(qemu_irq IRQ, hwaddr mem_base,
|
|||
d = FROM_SYSBUS(M48t59SysBusState, s);
|
||||
state = &d->state;
|
||||
sysbus_connect_irq(s, 0, IRQ);
|
||||
memory_region_init_io(&d->io, &m48t59_io_ops, state, "m48t59", 4);
|
||||
if (io_base != 0) {
|
||||
register_ioport_read(io_base, 0x04, 1, NVRAM_readb, state);
|
||||
register_ioport_write(io_base, 0x04, 1, NVRAM_writeb, state);
|
||||
memory_region_add_subregion(get_system_io(), io_base, &d->io);
|
||||
}
|
||||
if (mem_base != 0) {
|
||||
sysbus_mmio_map(s, 0, mem_base);
|
||||
|
|
|
@ -383,7 +383,8 @@ static void rtc_update_timer(void *opaque)
|
|||
check_update_timer(s);
|
||||
}
|
||||
|
||||
static void cmos_ioport_write(void *opaque, uint32_t addr, uint32_t data)
|
||||
static void cmos_ioport_write(void *opaque, hwaddr addr,
|
||||
uint64_t data, unsigned size)
|
||||
{
|
||||
RTCState *s = opaque;
|
||||
|
||||
|
@ -595,7 +596,8 @@ static int update_in_progress(RTCState *s)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static uint32_t cmos_ioport_read(void *opaque, uint32_t addr)
|
||||
static uint64_t cmos_ioport_read(void *opaque, hwaddr addr,
|
||||
unsigned size)
|
||||
{
|
||||
RTCState *s = opaque;
|
||||
int ret;
|
||||
|
@ -769,13 +771,14 @@ static void rtc_reset(void *opaque)
|
|||
#endif
|
||||
}
|
||||
|
||||
static const MemoryRegionPortio cmos_portio[] = {
|
||||
{0, 2, 1, .read = cmos_ioport_read, .write = cmos_ioport_write },
|
||||
PORTIO_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static const MemoryRegionOps cmos_ops = {
|
||||
.old_portio = cmos_portio
|
||||
.read = cmos_ioport_read,
|
||||
.write = cmos_ioport_write,
|
||||
.impl = {
|
||||
.min_access_size = 1,
|
||||
.max_access_size = 1,
|
||||
},
|
||||
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||
};
|
||||
|
||||
static void rtc_get_date(Object *obj, Visitor *v, void *opaque,
|
||||
|
|
45
hw/msi.c
45
hw/msi.c
|
@ -122,6 +122,31 @@ void msi_set_message(PCIDevice *dev, MSIMessage msg)
|
|||
pci_set_word(dev->config + msi_data_off(dev, msi64bit), msg.data);
|
||||
}
|
||||
|
||||
MSIMessage msi_get_message(PCIDevice *dev, unsigned int vector)
|
||||
{
|
||||
uint16_t flags = pci_get_word(dev->config + msi_flags_off(dev));
|
||||
bool msi64bit = flags & PCI_MSI_FLAGS_64BIT;
|
||||
unsigned int nr_vectors = msi_nr_vectors(flags);
|
||||
MSIMessage msg;
|
||||
|
||||
assert(vector < nr_vectors);
|
||||
|
||||
if (msi64bit) {
|
||||
msg.address = pci_get_quad(dev->config + msi_address_lo_off(dev));
|
||||
} else {
|
||||
msg.address = pci_get_long(dev->config + msi_address_lo_off(dev));
|
||||
}
|
||||
|
||||
/* upper bit 31:16 is zero */
|
||||
msg.data = pci_get_word(dev->config + msi_data_off(dev, msi64bit));
|
||||
if (nr_vectors > 1) {
|
||||
msg.data &= ~(nr_vectors - 1);
|
||||
msg.data |= vector;
|
||||
}
|
||||
|
||||
return msg;
|
||||
}
|
||||
|
||||
bool msi_enabled(const PCIDevice *dev)
|
||||
{
|
||||
return msi_present(dev) &&
|
||||
|
@ -249,8 +274,7 @@ void msi_notify(PCIDevice *dev, unsigned int vector)
|
|||
uint16_t flags = pci_get_word(dev->config + msi_flags_off(dev));
|
||||
bool msi64bit = flags & PCI_MSI_FLAGS_64BIT;
|
||||
unsigned int nr_vectors = msi_nr_vectors(flags);
|
||||
uint64_t address;
|
||||
uint32_t data;
|
||||
MSIMessage msg;
|
||||
|
||||
assert(vector < nr_vectors);
|
||||
if (msi_is_masked(dev, vector)) {
|
||||
|
@ -261,24 +285,13 @@ void msi_notify(PCIDevice *dev, unsigned int vector)
|
|||
return;
|
||||
}
|
||||
|
||||
if (msi64bit) {
|
||||
address = pci_get_quad(dev->config + msi_address_lo_off(dev));
|
||||
} else {
|
||||
address = pci_get_long(dev->config + msi_address_lo_off(dev));
|
||||
}
|
||||
|
||||
/* upper bit 31:16 is zero */
|
||||
data = pci_get_word(dev->config + msi_data_off(dev, msi64bit));
|
||||
if (nr_vectors > 1) {
|
||||
data &= ~(nr_vectors - 1);
|
||||
data |= vector;
|
||||
}
|
||||
msg = msi_get_message(dev, vector);
|
||||
|
||||
MSI_DEV_PRINTF(dev,
|
||||
"notify vector 0x%x"
|
||||
" address: 0x%"PRIx64" data: 0x%"PRIx32"\n",
|
||||
vector, address, data);
|
||||
stl_le_phys(address, data);
|
||||
vector, msg.address, msg.data);
|
||||
stl_le_phys(msg.address, msg.data);
|
||||
}
|
||||
|
||||
/* Normally called by pci_default_write_config(). */
|
||||
|
|
1
hw/msi.h
1
hw/msi.h
|
@ -32,6 +32,7 @@ struct MSIMessage {
|
|||
extern bool msi_supported;
|
||||
|
||||
void msi_set_message(PCIDevice *dev, MSIMessage msg);
|
||||
MSIMessage msi_get_message(PCIDevice *dev, unsigned int vector);
|
||||
bool msi_enabled(const PCIDevice *dev);
|
||||
int msi_init(struct PCIDevice *dev, uint8_t offset,
|
||||
unsigned int nr_vectors, bool msi64bit, bool msi_per_vector_mask);
|
||||
|
|
|
@ -1334,8 +1334,9 @@ static void n8x0_init(ram_addr_t ram_size, const char *boot_device,
|
|||
n8x0_dss_setup(s);
|
||||
n8x0_cbus_setup(s);
|
||||
n8x0_uart_setup(s);
|
||||
if (usb_enabled)
|
||||
if (usb_enabled(false)) {
|
||||
n8x0_usb_setup(s);
|
||||
}
|
||||
|
||||
if (kernel_filename) {
|
||||
/* Or at the linux loader. */
|
||||
|
|
19
hw/pc.c
19
hw/pc.c
|
@ -421,7 +421,8 @@ typedef struct Port92State {
|
|||
qemu_irq *a20_out;
|
||||
} Port92State;
|
||||
|
||||
static void port92_write(void *opaque, uint32_t addr, uint32_t val)
|
||||
static void port92_write(void *opaque, hwaddr addr, uint64_t val,
|
||||
unsigned size)
|
||||
{
|
||||
Port92State *s = opaque;
|
||||
|
||||
|
@ -433,7 +434,8 @@ static void port92_write(void *opaque, uint32_t addr, uint32_t val)
|
|||
}
|
||||
}
|
||||
|
||||
static uint32_t port92_read(void *opaque, uint32_t addr)
|
||||
static uint64_t port92_read(void *opaque, hwaddr addr,
|
||||
unsigned size)
|
||||
{
|
||||
Port92State *s = opaque;
|
||||
uint32_t ret;
|
||||
|
@ -468,13 +470,14 @@ static void port92_reset(DeviceState *d)
|
|||
s->outport &= ~1;
|
||||
}
|
||||
|
||||
static const MemoryRegionPortio port92_portio[] = {
|
||||
{ 0, 1, 1, .read = port92_read, .write = port92_write },
|
||||
PORTIO_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static const MemoryRegionOps port92_ops = {
|
||||
.old_portio = port92_portio
|
||||
.read = port92_read,
|
||||
.write = port92_write,
|
||||
.impl = {
|
||||
.min_access_size = 1,
|
||||
.max_access_size = 1,
|
||||
},
|
||||
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||
};
|
||||
|
||||
static int port92_initfn(ISADevice *dev)
|
||||
|
|
11
hw/pc_piix.c
11
hw/pc_piix.c
|
@ -43,6 +43,7 @@
|
|||
#include "xen.h"
|
||||
#include "memory.h"
|
||||
#include "exec-memory.h"
|
||||
#include "cpu.h"
|
||||
#ifdef CONFIG_XEN
|
||||
# include <xen/hvm/hvm_info_table.h>
|
||||
#endif
|
||||
|
@ -267,7 +268,7 @@ static void pc_init1(MemoryRegion *system_memory,
|
|||
pc_cmos_init(below_4g_mem_size, above_4g_mem_size, boot_device,
|
||||
floppy, idebus[0], idebus[1], rtc_state);
|
||||
|
||||
if (pci_enabled && usb_enabled) {
|
||||
if (pci_enabled && usb_enabled(false)) {
|
||||
pci_create_simple(pci_bus, piix3_devfn + 2, "piix3-usb-uhci");
|
||||
}
|
||||
|
||||
|
@ -302,6 +303,12 @@ static void pc_init_pci(QEMUMachineInitArgs *args)
|
|||
initrd_filename, cpu_model, 1, 1);
|
||||
}
|
||||
|
||||
static void pc_init_pci_1_3(QEMUMachineInitArgs *args)
|
||||
{
|
||||
enable_kvm_pv_eoi();
|
||||
pc_init_pci(args);
|
||||
}
|
||||
|
||||
static void pc_init_pci_no_kvmclock(QEMUMachineInitArgs *args)
|
||||
{
|
||||
ram_addr_t ram_size = args->ram_size;
|
||||
|
@ -349,7 +356,7 @@ static QEMUMachine pc_machine_v1_3 = {
|
|||
.name = "pc-1.3",
|
||||
.alias = "pc",
|
||||
.desc = "Standard PC",
|
||||
.init = pc_init_pci,
|
||||
.init = pc_init_pci_1_3,
|
||||
.max_cpus = 255,
|
||||
.is_default = 1,
|
||||
};
|
||||
|
|
42
hw/pci.c
42
hw/pci.c
|
@ -1117,10 +1117,21 @@ PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin)
|
|||
pin = bus->map_irq(dev, pin);
|
||||
dev = bus->parent_dev;
|
||||
} while (dev);
|
||||
assert(bus->route_intx_to_irq);
|
||||
|
||||
if (!bus->route_intx_to_irq) {
|
||||
error_report("PCI: Bug - unimplemented PCI INTx routing (%s)\n",
|
||||
object_get_typename(OBJECT(bus->qbus.parent)));
|
||||
return (PCIINTxRoute) { PCI_INTX_DISABLED, -1 };
|
||||
}
|
||||
|
||||
return bus->route_intx_to_irq(bus->irq_opaque, pin);
|
||||
}
|
||||
|
||||
bool pci_intx_route_changed(PCIINTxRoute *old, PCIINTxRoute *new)
|
||||
{
|
||||
return old->mode != new->mode || old->irq != new->irq;
|
||||
}
|
||||
|
||||
void pci_bus_fire_intx_routing_notifier(PCIBus *bus)
|
||||
{
|
||||
PCIDevice *dev;
|
||||
|
@ -1144,6 +1155,24 @@ void pci_device_set_intx_routing_notifier(PCIDevice *dev,
|
|||
dev->intx_routing_notifier = notifier;
|
||||
}
|
||||
|
||||
/*
|
||||
* PCI-to-PCI bridge specification
|
||||
* 9.1: Interrupt routing. Table 9-1
|
||||
*
|
||||
* the PCI Express Base Specification, Revision 2.1
|
||||
* 2.2.8.1: INTx interrutp signaling - Rules
|
||||
* the Implementation Note
|
||||
* Table 2-20
|
||||
*/
|
||||
/*
|
||||
* 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD
|
||||
* 0-origin unlike PCI interrupt pin register.
|
||||
*/
|
||||
int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin)
|
||||
{
|
||||
return (pin + PCI_SLOT(pci_dev->devfn)) % PCI_NUM_PINS;
|
||||
}
|
||||
|
||||
/***********************************************************/
|
||||
/* monitor info on PCI */
|
||||
|
||||
|
@ -1208,6 +1237,7 @@ static const pci_class_desc pci_class_descriptions[] =
|
|||
{ 0x0c02, "SSA controller", "ssa"},
|
||||
{ 0x0c03, "USB controller", "usb"},
|
||||
{ 0x0c04, "Fibre channel controller", "fibre-channel"},
|
||||
{ 0x0c05, "SMBus"},
|
||||
{ 0, NULL}
|
||||
};
|
||||
|
||||
|
@ -1667,16 +1697,16 @@ PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name)
|
|||
return pci_create_simple_multifunction(bus, devfn, false, name);
|
||||
}
|
||||
|
||||
static int pci_find_space(PCIDevice *pdev, uint8_t size)
|
||||
static uint8_t pci_find_space(PCIDevice *pdev, uint8_t size)
|
||||
{
|
||||
int config_size = pci_config_size(pdev);
|
||||
int offset = PCI_CONFIG_HEADER_SIZE;
|
||||
int i;
|
||||
for (i = PCI_CONFIG_HEADER_SIZE; i < config_size; ++i)
|
||||
for (i = PCI_CONFIG_HEADER_SIZE; i < PCI_CONFIG_SPACE_SIZE; ++i) {
|
||||
if (pdev->used[i])
|
||||
offset = i + 1;
|
||||
else if (i - offset + 1 == size)
|
||||
return offset;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1895,7 +1925,7 @@ int pci_add_capability(PCIDevice *pdev, uint8_t cap_id,
|
|||
config[PCI_CAP_LIST_NEXT] = pdev->config[PCI_CAPABILITY_LIST];
|
||||
pdev->config[PCI_CAPABILITY_LIST] = offset;
|
||||
pdev->config[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
|
||||
memset(pdev->used + offset, 0xFF, size);
|
||||
memset(pdev->used + offset, 0xFF, QEMU_ALIGN_UP(size, 4));
|
||||
/* Make capability read-only by default */
|
||||
memset(pdev->wmask + offset, 0, size);
|
||||
/* Check capability by default */
|
||||
|
@ -1915,7 +1945,7 @@ void pci_del_capability(PCIDevice *pdev, uint8_t cap_id, uint8_t size)
|
|||
memset(pdev->w1cmask + offset, 0, size);
|
||||
/* Clear cmask as device-specific registers can't be checked */
|
||||
memset(pdev->cmask + offset, 0, size);
|
||||
memset(pdev->used + offset, 0, size);
|
||||
memset(pdev->used + offset, 0, QEMU_ALIGN_UP(size, 4));
|
||||
|
||||
if (!pdev->config[PCI_CAPABILITY_LIST])
|
||||
pdev->config[PCI_STATUS] &= ~PCI_STATUS_CAP_LIST;
|
||||
|
|
3
hw/pci.h
3
hw/pci.h
|
@ -318,6 +318,8 @@ void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
|
|||
void *irq_opaque, int nirq);
|
||||
int pci_bus_get_irq_level(PCIBus *bus, int irq_num);
|
||||
void pci_bus_hotplug(PCIBus *bus, pci_hotplug_fn hotplug, DeviceState *dev);
|
||||
/* 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD */
|
||||
int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin);
|
||||
PCIBus *pci_register_bus(DeviceState *parent, const char *name,
|
||||
pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
|
||||
void *irq_opaque,
|
||||
|
@ -326,6 +328,7 @@ PCIBus *pci_register_bus(DeviceState *parent, const char *name,
|
|||
uint8_t devfn_min, int nirq);
|
||||
void pci_bus_set_route_irq_fn(PCIBus *, pci_route_irq_fn);
|
||||
PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin);
|
||||
bool pci_intx_route_changed(PCIINTxRoute *old, PCIINTxRoute *new);
|
||||
void pci_bus_fire_intx_routing_notifier(PCIBus *bus);
|
||||
void pci_device_set_intx_routing_notifier(PCIDevice *dev,
|
||||
PCIINTxRoutingNotifier notifier);
|
||||
|
|
|
@ -151,58 +151,63 @@ static void pci_bridge_init_alias(PCIBridge *bridge, MemoryRegion *alias,
|
|||
memory_region_add_subregion_overlap(parent_space, base, alias, 1);
|
||||
}
|
||||
|
||||
static void pci_bridge_cleanup_alias(MemoryRegion *alias,
|
||||
MemoryRegion *parent_space)
|
||||
{
|
||||
memory_region_del_subregion(parent_space, alias);
|
||||
memory_region_destroy(alias);
|
||||
}
|
||||
|
||||
static void pci_bridge_region_init(PCIBridge *br)
|
||||
static PCIBridgeWindows *pci_bridge_region_init(PCIBridge *br)
|
||||
{
|
||||
PCIBus *parent = br->dev.bus;
|
||||
PCIBridgeWindows *w = g_new(PCIBridgeWindows, 1);
|
||||
uint16_t cmd = pci_get_word(br->dev.config + PCI_COMMAND);
|
||||
|
||||
pci_bridge_init_alias(br, &br->alias_pref_mem,
|
||||
pci_bridge_init_alias(br, &w->alias_pref_mem,
|
||||
PCI_BASE_ADDRESS_MEM_PREFETCH,
|
||||
"pci_bridge_pref_mem",
|
||||
&br->address_space_mem,
|
||||
parent->address_space_mem,
|
||||
cmd & PCI_COMMAND_MEMORY);
|
||||
pci_bridge_init_alias(br, &br->alias_mem,
|
||||
pci_bridge_init_alias(br, &w->alias_mem,
|
||||
PCI_BASE_ADDRESS_SPACE_MEMORY,
|
||||
"pci_bridge_mem",
|
||||
&br->address_space_mem,
|
||||
parent->address_space_mem,
|
||||
cmd & PCI_COMMAND_MEMORY);
|
||||
pci_bridge_init_alias(br, &br->alias_io,
|
||||
pci_bridge_init_alias(br, &w->alias_io,
|
||||
PCI_BASE_ADDRESS_SPACE_IO,
|
||||
"pci_bridge_io",
|
||||
&br->address_space_io,
|
||||
parent->address_space_io,
|
||||
cmd & PCI_COMMAND_IO);
|
||||
/* TODO: optinal VGA and VGA palette snooping support. */
|
||||
|
||||
return w;
|
||||
}
|
||||
|
||||
static void pci_bridge_region_cleanup(PCIBridge *br)
|
||||
static void pci_bridge_region_del(PCIBridge *br, PCIBridgeWindows *w)
|
||||
{
|
||||
PCIBus *parent = br->dev.bus;
|
||||
pci_bridge_cleanup_alias(&br->alias_io,
|
||||
parent->address_space_io);
|
||||
pci_bridge_cleanup_alias(&br->alias_mem,
|
||||
parent->address_space_mem);
|
||||
pci_bridge_cleanup_alias(&br->alias_pref_mem,
|
||||
parent->address_space_mem);
|
||||
|
||||
memory_region_del_subregion(parent->address_space_io, &w->alias_io);
|
||||
memory_region_del_subregion(parent->address_space_mem, &w->alias_mem);
|
||||
memory_region_del_subregion(parent->address_space_mem, &w->alias_pref_mem);
|
||||
}
|
||||
|
||||
static void pci_bridge_region_cleanup(PCIBridge *br, PCIBridgeWindows *w)
|
||||
{
|
||||
memory_region_destroy(&w->alias_io);
|
||||
memory_region_destroy(&w->alias_mem);
|
||||
memory_region_destroy(&w->alias_pref_mem);
|
||||
g_free(w);
|
||||
}
|
||||
|
||||
static void pci_bridge_update_mappings(PCIBridge *br)
|
||||
{
|
||||
PCIBridgeWindows *w = br->windows;
|
||||
|
||||
/* Make updates atomic to: handle the case of one VCPU updating the bridge
|
||||
* while another accesses an unaffected region. */
|
||||
memory_region_transaction_begin();
|
||||
pci_bridge_region_cleanup(br);
|
||||
pci_bridge_region_init(br);
|
||||
pci_bridge_region_del(br, br->windows);
|
||||
br->windows = pci_bridge_region_init(br);
|
||||
memory_region_transaction_commit();
|
||||
pci_bridge_region_cleanup(br, w);
|
||||
}
|
||||
|
||||
/* default write_config function for PCI-to-PCI bridge */
|
||||
|
@ -326,7 +331,7 @@ int pci_bridge_initfn(PCIDevice *dev)
|
|||
memory_region_init(&br->address_space_mem, "pci_bridge_pci", INT64_MAX);
|
||||
sec_bus->address_space_io = &br->address_space_io;
|
||||
memory_region_init(&br->address_space_io, "pci_bridge_io", 65536);
|
||||
pci_bridge_region_init(br);
|
||||
br->windows = pci_bridge_region_init(br);
|
||||
QLIST_INIT(&sec_bus->child);
|
||||
QLIST_INSERT_HEAD(&parent->child, sec_bus, sibling);
|
||||
return 0;
|
||||
|
@ -338,7 +343,8 @@ void pci_bridge_exitfn(PCIDevice *pci_dev)
|
|||
PCIBridge *s = DO_UPCAST(PCIBridge, dev, pci_dev);
|
||||
assert(QLIST_EMPTY(&s->sec_bus.child));
|
||||
QLIST_REMOVE(&s->sec_bus, sibling);
|
||||
pci_bridge_region_cleanup(s);
|
||||
pci_bridge_region_del(s, s->windows);
|
||||
pci_bridge_region_cleanup(s, s->windows);
|
||||
memory_region_destroy(&s->address_space_mem);
|
||||
memory_region_destroy(&s->address_space_io);
|
||||
/* qbus_free() is called automatically by qdev_free() */
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#define PCI_CLASS_SYSTEM_OTHER 0x0880
|
||||
|
||||
#define PCI_CLASS_SERIAL_USB 0x0c03
|
||||
#define PCI_CLASS_SERIAL_SMBUS 0x0c05
|
||||
|
||||
#define PCI_CLASS_BRIDGE_HOST 0x0600
|
||||
#define PCI_CLASS_BRIDGE_ISA 0x0601
|
||||
|
@ -105,6 +106,7 @@
|
|||
#define PCI_DEVICE_ID_INTEL_82378 0x0484
|
||||
#define PCI_DEVICE_ID_INTEL_82441 0x1237
|
||||
#define PCI_DEVICE_ID_INTEL_82801AA_5 0x2415
|
||||
#define PCI_DEVICE_ID_INTEL_82801BA_11 0x244e
|
||||
#define PCI_DEVICE_ID_INTEL_82801D 0x24CD
|
||||
#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab
|
||||
#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
|
||||
|
|
|
@ -40,6 +40,19 @@ struct PCIBus {
|
|||
int *irq_count;
|
||||
};
|
||||
|
||||
typedef struct PCIBridgeWindows PCIBridgeWindows;
|
||||
|
||||
/*
|
||||
* Aliases for each of the address space windows that the bridge
|
||||
* can forward. Mapped into the bridge's parent's address space,
|
||||
* as subregions.
|
||||
*/
|
||||
struct PCIBridgeWindows {
|
||||
MemoryRegion alias_pref_mem;
|
||||
MemoryRegion alias_mem;
|
||||
MemoryRegion alias_io;
|
||||
};
|
||||
|
||||
struct PCIBridge {
|
||||
PCIDevice dev;
|
||||
|
||||
|
@ -55,14 +68,9 @@ struct PCIBridge {
|
|||
*/
|
||||
MemoryRegion address_space_mem;
|
||||
MemoryRegion address_space_io;
|
||||
/*
|
||||
* Aliases for each of the address space windows that the bridge
|
||||
* can forward. Mapped into the bridge's parent's address space,
|
||||
* as subregions.
|
||||
*/
|
||||
MemoryRegion alias_pref_mem;
|
||||
MemoryRegion alias_mem;
|
||||
MemoryRegion alias_io;
|
||||
|
||||
PCIBridgeWindows *windows;
|
||||
|
||||
pci_map_irq_fn map_irq;
|
||||
const char *bus_name;
|
||||
};
|
||||
|
|
|
@ -107,14 +107,9 @@ static const MemoryRegionOps pcie_mmcfg_ops = {
|
|||
/* pcie_host::base_addr == PCIE_BASE_ADDR_UNMAPPED when it isn't mapped. */
|
||||
#define PCIE_BASE_ADDR_UNMAPPED ((hwaddr)-1ULL)
|
||||
|
||||
int pcie_host_init(PCIExpressHost *e, uint32_t size)
|
||||
int pcie_host_init(PCIExpressHost *e)
|
||||
{
|
||||
assert(!(size & (size - 1))); /* power of 2 */
|
||||
assert(size >= PCIE_MMCFG_SIZE_MIN);
|
||||
assert(size <= PCIE_MMCFG_SIZE_MAX);
|
||||
e->base_addr = PCIE_BASE_ADDR_UNMAPPED;
|
||||
e->size = size;
|
||||
memory_region_init_io(&e->mmio, &pcie_mmcfg_ops, e, "pcie-mmcfg", e->size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -123,22 +118,44 @@ void pcie_host_mmcfg_unmap(PCIExpressHost *e)
|
|||
{
|
||||
if (e->base_addr != PCIE_BASE_ADDR_UNMAPPED) {
|
||||
memory_region_del_subregion(get_system_memory(), &e->mmio);
|
||||
memory_region_destroy(&e->mmio);
|
||||
e->base_addr = PCIE_BASE_ADDR_UNMAPPED;
|
||||
}
|
||||
}
|
||||
|
||||
void pcie_host_mmcfg_map(PCIExpressHost *e, hwaddr addr)
|
||||
void pcie_host_mmcfg_map(PCIExpressHost *e, hwaddr addr,
|
||||
uint32_t size)
|
||||
{
|
||||
assert(!(size & (size - 1))); /* power of 2 */
|
||||
assert(size >= PCIE_MMCFG_SIZE_MIN);
|
||||
assert(size <= PCIE_MMCFG_SIZE_MAX);
|
||||
e->size = size;
|
||||
memory_region_init_io(&e->mmio, &pcie_mmcfg_ops, e, "pcie-mmcfg", e->size);
|
||||
e->base_addr = addr;
|
||||
memory_region_add_subregion(get_system_memory(), e->base_addr, &e->mmio);
|
||||
}
|
||||
|
||||
void pcie_host_mmcfg_update(PCIExpressHost *e,
|
||||
int enable,
|
||||
hwaddr addr)
|
||||
hwaddr addr,
|
||||
uint32_t size)
|
||||
{
|
||||
pcie_host_mmcfg_unmap(e);
|
||||
if (enable) {
|
||||
pcie_host_mmcfg_map(e, addr);
|
||||
pcie_host_mmcfg_map(e, addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
static const TypeInfo pcie_host_type_info = {
|
||||
.name = TYPE_PCIE_HOST_BRIDGE,
|
||||
.parent = TYPE_PCI_HOST_BRIDGE,
|
||||
.abstract = true,
|
||||
.instance_size = sizeof(PCIExpressHost),
|
||||
};
|
||||
|
||||
static void pcie_host_register_types(void)
|
||||
{
|
||||
type_register_static(&pcie_host_type_info);
|
||||
}
|
||||
|
||||
type_init(pcie_host_register_types)
|
||||
|
|
|
@ -24,6 +24,10 @@
|
|||
#include "pci_host.h"
|
||||
#include "memory.h"
|
||||
|
||||
#define TYPE_PCIE_HOST_BRIDGE "pcie-host-bridge"
|
||||
#define PCIE_HOST_BRIDGE(obj) \
|
||||
OBJECT_CHECK(PCIExpressHost, (obj), TYPE_PCIE_HOST_BRIDGE)
|
||||
|
||||
struct PCIExpressHost {
|
||||
PCIHostState pci;
|
||||
|
||||
|
@ -39,11 +43,12 @@ struct PCIExpressHost {
|
|||
MemoryRegion mmio;
|
||||
};
|
||||
|
||||
int pcie_host_init(PCIExpressHost *e, uint32_t size);
|
||||
int pcie_host_init(PCIExpressHost *e);
|
||||
void pcie_host_mmcfg_unmap(PCIExpressHost *e);
|
||||
void pcie_host_mmcfg_map(PCIExpressHost *e, hwaddr addr);
|
||||
void pcie_host_mmcfg_map(PCIExpressHost *e, hwaddr addr, uint32_t size);
|
||||
void pcie_host_mmcfg_update(PCIExpressHost *e,
|
||||
int enable,
|
||||
hwaddr addr);
|
||||
hwaddr addr,
|
||||
uint32_t size);
|
||||
|
||||
#endif /* PCIE_HOST_H */
|
||||
|
|
48
hw/pckbd.c
48
hw/pckbd.c
|
@ -194,7 +194,8 @@ static void kbd_update_aux_irq(void *opaque, int level)
|
|||
kbd_update_irq(s);
|
||||
}
|
||||
|
||||
static uint32_t kbd_read_status(void *opaque, uint32_t addr)
|
||||
static uint64_t kbd_read_status(void *opaque, hwaddr addr,
|
||||
unsigned size)
|
||||
{
|
||||
KBDState *s = opaque;
|
||||
int val;
|
||||
|
@ -223,7 +224,8 @@ static void outport_write(KBDState *s, uint32_t val)
|
|||
}
|
||||
}
|
||||
|
||||
static void kbd_write_command(void *opaque, uint32_t addr, uint32_t val)
|
||||
static void kbd_write_command(void *opaque, hwaddr addr,
|
||||
uint64_t val, unsigned size)
|
||||
{
|
||||
KBDState *s = opaque;
|
||||
|
||||
|
@ -303,12 +305,13 @@ static void kbd_write_command(void *opaque, uint32_t addr, uint32_t val)
|
|||
/* ignore that */
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "qemu: unsupported keyboard cmd=0x%02x\n", val);
|
||||
fprintf(stderr, "qemu: unsupported keyboard cmd=0x%02x\n", (int)val);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t kbd_read_data(void *opaque, uint32_t addr)
|
||||
static uint64_t kbd_read_data(void *opaque, hwaddr addr,
|
||||
unsigned size)
|
||||
{
|
||||
KBDState *s = opaque;
|
||||
uint32_t val;
|
||||
|
@ -322,7 +325,8 @@ static uint32_t kbd_read_data(void *opaque, uint32_t addr)
|
|||
return val;
|
||||
}
|
||||
|
||||
static void kbd_write_data(void *opaque, uint32_t addr, uint32_t val)
|
||||
static void kbd_write_data(void *opaque, hwaddr addr,
|
||||
uint64_t val, unsigned size)
|
||||
{
|
||||
KBDState *s = opaque;
|
||||
|
||||
|
@ -385,9 +389,9 @@ static uint32_t kbd_mm_readb (void *opaque, hwaddr addr)
|
|||
KBDState *s = opaque;
|
||||
|
||||
if (addr & s->mask)
|
||||
return kbd_read_status(s, 0) & 0xff;
|
||||
return kbd_read_status(s, 0, 1) & 0xff;
|
||||
else
|
||||
return kbd_read_data(s, 0) & 0xff;
|
||||
return kbd_read_data(s, 0, 1) & 0xff;
|
||||
}
|
||||
|
||||
static void kbd_mm_writeb (void *opaque, hwaddr addr, uint32_t value)
|
||||
|
@ -395,9 +399,9 @@ static void kbd_mm_writeb (void *opaque, hwaddr addr, uint32_t value)
|
|||
KBDState *s = opaque;
|
||||
|
||||
if (addr & s->mask)
|
||||
kbd_write_command(s, 0, value & 0xff);
|
||||
kbd_write_command(s, 0, value & 0xff, 1);
|
||||
else
|
||||
kbd_write_data(s, 0, value & 0xff);
|
||||
kbd_write_data(s, 0, value & 0xff, 1);
|
||||
}
|
||||
|
||||
static const MemoryRegionOps i8042_mmio_ops = {
|
||||
|
@ -459,22 +463,24 @@ static const VMStateDescription vmstate_kbd_isa = {
|
|||
}
|
||||
};
|
||||
|
||||
static const MemoryRegionPortio i8042_data_portio[] = {
|
||||
{ 0, 1, 1, .read = kbd_read_data, .write = kbd_write_data },
|
||||
PORTIO_END_OF_LIST()
|
||||
};
|
||||
|
||||
static const MemoryRegionPortio i8042_cmd_portio[] = {
|
||||
{ 0, 1, 1, .read = kbd_read_status, .write = kbd_write_command },
|
||||
PORTIO_END_OF_LIST()
|
||||
};
|
||||
|
||||
static const MemoryRegionOps i8042_data_ops = {
|
||||
.old_portio = i8042_data_portio
|
||||
.read = kbd_read_data,
|
||||
.write = kbd_write_data,
|
||||
.impl = {
|
||||
.min_access_size = 1,
|
||||
.max_access_size = 1,
|
||||
},
|
||||
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||
};
|
||||
|
||||
static const MemoryRegionOps i8042_cmd_ops = {
|
||||
.old_portio = i8042_cmd_portio
|
||||
.read = kbd_read_status,
|
||||
.write = kbd_write_command,
|
||||
.impl = {
|
||||
.min_access_size = 1,
|
||||
.max_access_size = 1,
|
||||
},
|
||||
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||
};
|
||||
|
||||
static int i8042_initfn(ISADevice *dev)
|
||||
|
|
|
@ -11,6 +11,7 @@ obj-y += ppc_newworld.o
|
|||
obj-$(CONFIG_PSERIES) += spapr.o spapr_hcall.o spapr_rtas.o spapr_vio.o
|
||||
obj-$(CONFIG_PSERIES) += xics.o spapr_vty.o spapr_llan.o spapr_vscsi.o
|
||||
obj-$(CONFIG_PSERIES) += spapr_pci.o pci-hotplug.o spapr_iommu.o
|
||||
obj-$(CONFIG_PSERIES) += spapr_events.o
|
||||
# PowerPC 4xx boards
|
||||
obj-y += ppc4xx_devs.o ppc4xx_pci.o ppc405_uc.o ppc405_boards.o
|
||||
obj-y += ppc440_bamboo.o
|
||||
|
|
|
@ -52,7 +52,6 @@
|
|||
#define MPC8544_PCI_REGS_BASE (MPC8544_CCSRBAR_BASE + 0x8000ULL)
|
||||
#define MPC8544_PCI_REGS_SIZE 0x1000ULL
|
||||
#define MPC8544_PCI_IO 0xE1000000ULL
|
||||
#define MPC8544_PCI_IOLEN 0x10000ULL
|
||||
#define MPC8544_UTIL_BASE (MPC8544_CCSRBAR_BASE + 0xe0000ULL)
|
||||
#define MPC8544_SPIN_BASE 0xEF000000ULL
|
||||
|
||||
|
@ -496,7 +495,7 @@ void ppce500_init(PPCE500Params *params)
|
|||
if (serial_hds[1]) {
|
||||
serial_mm_init(address_space_mem, MPC8544_SERIAL1_REGS_BASE,
|
||||
0, mpic[12+26], 399193,
|
||||
serial_hds[0], DEVICE_BIG_ENDIAN);
|
||||
serial_hds[1], DEVICE_BIG_ENDIAN);
|
||||
}
|
||||
|
||||
/* General Utility device */
|
||||
|
@ -511,7 +510,7 @@ void ppce500_init(PPCE500Params *params)
|
|||
if (!pci_bus)
|
||||
printf("couldn't create PCI controller!\n");
|
||||
|
||||
isa_mmio_init(MPC8544_PCI_IO, MPC8544_PCI_IOLEN);
|
||||
sysbus_mmio_map(sysbus_from_qdev(dev), 1, MPC8544_PCI_IO);
|
||||
|
||||
if (pci_bus) {
|
||||
/* Register network interfaces. */
|
||||
|
|
|
@ -59,7 +59,7 @@ static int bamboo_load_device_tree(hwaddr addr,
|
|||
{
|
||||
int ret = -1;
|
||||
#ifdef CONFIG_FDT
|
||||
uint32_t mem_reg_property[] = { 0, 0, ramsize };
|
||||
uint32_t mem_reg_property[] = { 0, 0, cpu_to_be32(ramsize) };
|
||||
char *filename;
|
||||
int fdt_size;
|
||||
void *fdt;
|
||||
|
|
|
@ -348,10 +348,6 @@ static void ppc_core99_init(QEMUMachineInitArgs *args)
|
|||
ide_mem[1] = pmac_ide_init(hd, pic[0x0d], dbdma, 0x16, pic[0x02]);
|
||||
ide_mem[2] = pmac_ide_init(&hd[MAX_IDE_DEVS], pic[0x0e], dbdma, 0x1a, pic[0x02]);
|
||||
|
||||
/* cuda also initialize ADB */
|
||||
if (machine_arch == ARCH_MAC99_U3) {
|
||||
usb_enabled = 1;
|
||||
}
|
||||
cuda_init(&cuda_mem, pic[0x19]);
|
||||
|
||||
adb_kbd_init(&adb_bus);
|
||||
|
@ -360,15 +356,14 @@ static void ppc_core99_init(QEMUMachineInitArgs *args)
|
|||
macio_init(pci_bus, PCI_DEVICE_ID_APPLE_UNI_N_KEYL, 0, pic_mem,
|
||||
dbdma_mem, cuda_mem, NULL, 3, ide_mem, escc_bar);
|
||||
|
||||
if (usb_enabled) {
|
||||
if (usb_enabled(machine_arch == ARCH_MAC99_U3)) {
|
||||
pci_create_simple(pci_bus, -1, "pci-ohci");
|
||||
}
|
||||
|
||||
/* U3 needs to use USB for input because Linux doesn't support via-cuda
|
||||
on PPC64 */
|
||||
if (machine_arch == ARCH_MAC99_U3) {
|
||||
usbdevice_create("keyboard");
|
||||
usbdevice_create("mouse");
|
||||
/* U3 needs to use USB for input because Linux doesn't support via-cuda
|
||||
on PPC64 */
|
||||
if (machine_arch == ARCH_MAC99_U3) {
|
||||
usbdevice_create("keyboard");
|
||||
usbdevice_create("mouse");
|
||||
}
|
||||
}
|
||||
|
||||
if (graphic_depth != 15 && graphic_depth != 32 && graphic_depth != 8)
|
||||
|
|
|
@ -286,7 +286,7 @@ static void ppc_heathrow_init(QEMUMachineInitArgs *args)
|
|||
macio_init(pci_bus, PCI_DEVICE_ID_APPLE_343S1201, 1, pic_mem,
|
||||
dbdma_mem, cuda_mem, nvr, 2, ide_mem, escc_bar);
|
||||
|
||||
if (usb_enabled) {
|
||||
if (usb_enabled(false)) {
|
||||
pci_create_simple(pci_bus, -1, "pci-ohci");
|
||||
}
|
||||
|
||||
|
|
|
@ -661,7 +661,7 @@ static void ppc_prep_init(QEMUMachineInitArgs *args)
|
|||
memory_region_add_subregion(sysmem, 0xFEFF0000, xcsr);
|
||||
#endif
|
||||
|
||||
if (usb_enabled) {
|
||||
if (usb_enabled(false)) {
|
||||
pci_create_simple(pci_bus, -1, "pci-ohci");
|
||||
}
|
||||
|
||||
|
|
|
@ -31,6 +31,8 @@
|
|||
#define PCIE500_ALL_SIZE 0x1000
|
||||
#define PCIE500_REG_SIZE (PCIE500_ALL_SIZE - PCIE500_REG_BASE)
|
||||
|
||||
#define PCIE500_PCI_IOLEN 0x10000ULL
|
||||
|
||||
#define PPCE500_PCI_CONFIG_ADDR 0x0
|
||||
#define PPCE500_PCI_CONFIG_DATA 0x4
|
||||
#define PPCE500_PCI_INTACK 0x8
|
||||
|
@ -87,6 +89,7 @@ struct PPCE500PCIState {
|
|||
/* mmio maps */
|
||||
MemoryRegion container;
|
||||
MemoryRegion iomem;
|
||||
MemoryRegion pio;
|
||||
};
|
||||
|
||||
typedef struct PPCE500PCIState PPCE500PCIState;
|
||||
|
@ -314,7 +317,6 @@ static int e500_pcihost_initfn(SysBusDevice *dev)
|
|||
PCIBus *b;
|
||||
int i;
|
||||
MemoryRegion *address_space_mem = get_system_memory();
|
||||
MemoryRegion *address_space_io = get_system_io();
|
||||
|
||||
h = PCI_HOST_BRIDGE(dev);
|
||||
s = PPC_E500_PCI_HOST_BRIDGE(dev);
|
||||
|
@ -323,9 +325,11 @@ static int e500_pcihost_initfn(SysBusDevice *dev)
|
|||
sysbus_init_irq(dev, &s->irq[i]);
|
||||
}
|
||||
|
||||
memory_region_init(&s->pio, "pci-pio", PCIE500_PCI_IOLEN);
|
||||
|
||||
b = pci_register_bus(DEVICE(dev), NULL, mpc85xx_pci_set_irq,
|
||||
mpc85xx_pci_map_irq, s->irq, address_space_mem,
|
||||
address_space_io, PCI_DEVFN(0x11, 0), 4);
|
||||
&s->pio, PCI_DEVFN(0x11, 0), 4);
|
||||
h->bus = b;
|
||||
|
||||
pci_create_simple(b, 0, "e500-host-bridge");
|
||||
|
@ -341,6 +345,7 @@ static int e500_pcihost_initfn(SysBusDevice *dev)
|
|||
memory_region_add_subregion(&s->container, PCIE500_CFGDATA, &h->data_mem);
|
||||
memory_region_add_subregion(&s->container, PCIE500_REG_BASE, &s->iomem);
|
||||
sysbus_init_mmio(dev, &s->container);
|
||||
sysbus_init_mmio(dev, &s->pio);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -2108,7 +2108,7 @@ PXA2xxState *pxa270_init(MemoryRegion *address_space,
|
|||
s->ssp[i] = (SSIBus *)qdev_get_child_bus(dev, "ssi");
|
||||
}
|
||||
|
||||
if (usb_enabled) {
|
||||
if (usb_enabled(false)) {
|
||||
sysbus_create_simple("sysbus-ohci", 0x4c000000,
|
||||
qdev_get_gpio_in(s->pic, PXA2XX_PIC_USBH1));
|
||||
}
|
||||
|
@ -2239,7 +2239,7 @@ PXA2xxState *pxa255_init(MemoryRegion *address_space, unsigned int sdram_size)
|
|||
s->ssp[i] = (SSIBus *)qdev_get_child_bus(dev, "ssi");
|
||||
}
|
||||
|
||||
if (usb_enabled) {
|
||||
if (usb_enabled(false)) {
|
||||
sysbus_create_simple("sysbus-ohci", 0x4c000000,
|
||||
qdev_get_gpio_in(s->pic, PXA2XX_PIC_USBH1));
|
||||
}
|
||||
|
|
|
@ -227,7 +227,7 @@ static void realview_init(ram_addr_t ram_size,
|
|||
sysbus_connect_irq(busdev, 2, pic[50]);
|
||||
sysbus_connect_irq(busdev, 3, pic[51]);
|
||||
pci_bus = (PCIBus *)qdev_get_child_bus(dev, "pci");
|
||||
if (usb_enabled) {
|
||||
if (usb_enabled(false)) {
|
||||
pci_create_simple(pci_bus, -1, "pci-ohci");
|
||||
}
|
||||
n = drive_get_max_bus(IF_SCSI);
|
||||
|
|
78
hw/rtl8139.c
78
hw/rtl8139.c
|
@ -3187,38 +3187,6 @@ static uint32_t rtl8139_io_readl(void *opaque, uint8_t addr)
|
|||
|
||||
/* */
|
||||
|
||||
static void rtl8139_ioport_writeb(void *opaque, uint32_t addr, uint32_t val)
|
||||
{
|
||||
rtl8139_io_writeb(opaque, addr & 0xFF, val);
|
||||
}
|
||||
|
||||
static void rtl8139_ioport_writew(void *opaque, uint32_t addr, uint32_t val)
|
||||
{
|
||||
rtl8139_io_writew(opaque, addr & 0xFF, val);
|
||||
}
|
||||
|
||||
static void rtl8139_ioport_writel(void *opaque, uint32_t addr, uint32_t val)
|
||||
{
|
||||
rtl8139_io_writel(opaque, addr & 0xFF, val);
|
||||
}
|
||||
|
||||
static uint32_t rtl8139_ioport_readb(void *opaque, uint32_t addr)
|
||||
{
|
||||
return rtl8139_io_readb(opaque, addr & 0xFF);
|
||||
}
|
||||
|
||||
static uint32_t rtl8139_ioport_readw(void *opaque, uint32_t addr)
|
||||
{
|
||||
return rtl8139_io_readw(opaque, addr & 0xFF);
|
||||
}
|
||||
|
||||
static uint32_t rtl8139_ioport_readl(void *opaque, uint32_t addr)
|
||||
{
|
||||
return rtl8139_io_readl(opaque, addr & 0xFF);
|
||||
}
|
||||
|
||||
/* */
|
||||
|
||||
static void rtl8139_mmio_writeb(void *opaque, hwaddr addr, uint32_t val)
|
||||
{
|
||||
rtl8139_io_writeb(opaque, addr & 0xFF, val);
|
||||
|
@ -3386,18 +3354,44 @@ static const VMStateDescription vmstate_rtl8139 = {
|
|||
/***********************************************************/
|
||||
/* PCI RTL8139 definitions */
|
||||
|
||||
static const MemoryRegionPortio rtl8139_portio[] = {
|
||||
{ 0, 0x100, 1, .read = rtl8139_ioport_readb, },
|
||||
{ 0, 0x100, 1, .write = rtl8139_ioport_writeb, },
|
||||
{ 0, 0x100, 2, .read = rtl8139_ioport_readw, },
|
||||
{ 0, 0x100, 2, .write = rtl8139_ioport_writew, },
|
||||
{ 0, 0x100, 4, .read = rtl8139_ioport_readl, },
|
||||
{ 0, 0x100, 4, .write = rtl8139_ioport_writel, },
|
||||
PORTIO_END_OF_LIST()
|
||||
};
|
||||
static void rtl8139_ioport_write(void *opaque, hwaddr addr,
|
||||
uint64_t val, unsigned size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
rtl8139_io_writeb(opaque, addr, val);
|
||||
break;
|
||||
case 2:
|
||||
rtl8139_io_writew(opaque, addr, val);
|
||||
break;
|
||||
case 4:
|
||||
rtl8139_io_writel(opaque, addr, val);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t rtl8139_ioport_read(void *opaque, hwaddr addr,
|
||||
unsigned size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
return rtl8139_io_readb(opaque, addr);
|
||||
case 2:
|
||||
return rtl8139_io_readw(opaque, addr);
|
||||
case 4:
|
||||
return rtl8139_io_readl(opaque, addr);
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static const MemoryRegionOps rtl8139_io_ops = {
|
||||
.old_portio = rtl8139_portio,
|
||||
.read = rtl8139_ioport_read,
|
||||
.write = rtl8139_ioport_write,
|
||||
.impl = {
|
||||
.min_access_size = 1,
|
||||
.max_access_size = 4,
|
||||
},
|
||||
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||
};
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include "exec-memory.h"
|
||||
|
||||
#include "hw/s390-virtio-bus.h"
|
||||
#include "hw/s390x/sclp.h"
|
||||
|
||||
//#define DEBUG_S390
|
||||
|
||||
|
@ -184,6 +185,7 @@ static void s390_init(QEMUMachineInitArgs *args)
|
|||
|
||||
/* get a BUS */
|
||||
s390_bus = s390_virtio_bus_init(&my_ram_size);
|
||||
s390_sclp_init();
|
||||
|
||||
/* allocate RAM */
|
||||
memory_region_init_ram(ram, "s390.ram", my_ram_size);
|
||||
|
@ -285,8 +287,8 @@ static void s390_init(QEMUMachineInitArgs *args)
|
|||
}
|
||||
|
||||
/* we have to overwrite values in the kernel image, which are "rom" */
|
||||
memcpy(rom_ptr(INITRD_PARM_START), &initrd_offset, 8);
|
||||
memcpy(rom_ptr(INITRD_PARM_SIZE), &initrd_size, 8);
|
||||
stq_p(rom_ptr(INITRD_PARM_START), initrd_offset);
|
||||
stq_p(rom_ptr(INITRD_PARM_SIZE), initrd_size);
|
||||
}
|
||||
|
||||
if (rom_ptr(KERN_PARM_AREA)) {
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
obj-y = s390-virtio-bus.o s390-virtio.o
|
||||
|
||||
obj-y := $(addprefix ../,$(obj-y))
|
||||
obj-y += sclp.o
|
||||
obj-y += event-facility.o
|
||||
obj-y += sclpquiesce.o sclpconsole.o
|
||||
|
|
398
hw/s390x/event-facility.c
Normal file
398
hw/s390x/event-facility.c
Normal file
|
@ -0,0 +1,398 @@
|
|||
/*
|
||||
* SCLP
|
||||
* Event Facility
|
||||
* handles SCLP event types
|
||||
* - Signal Quiesce - system power down
|
||||
* - ASCII Console Data - VT220 read and write
|
||||
*
|
||||
* Copyright IBM, Corp. 2012
|
||||
*
|
||||
* Authors:
|
||||
* Heinz Graalfs <graalfs@de.ibm.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or (at your
|
||||
* option) any later version. See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "monitor.h"
|
||||
#include "sysemu.h"
|
||||
|
||||
#include "sclp.h"
|
||||
#include "event-facility.h"
|
||||
|
||||
typedef struct EventTypesBus {
|
||||
BusState qbus;
|
||||
} EventTypesBus;
|
||||
|
||||
struct SCLPEventFacility {
|
||||
EventTypesBus sbus;
|
||||
DeviceState *qdev;
|
||||
/* guest' receive mask */
|
||||
unsigned int receive_mask;
|
||||
};
|
||||
|
||||
/* return true if any child has event pending set */
|
||||
static bool event_pending(SCLPEventFacility *ef)
|
||||
{
|
||||
BusChild *kid;
|
||||
SCLPEvent *event;
|
||||
SCLPEventClass *event_class;
|
||||
|
||||
QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
|
||||
DeviceState *qdev = kid->child;
|
||||
event = DO_UPCAST(SCLPEvent, qdev, qdev);
|
||||
event_class = SCLP_EVENT_GET_CLASS(event);
|
||||
if (event->event_pending &&
|
||||
event_class->get_send_mask() & ef->receive_mask) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static unsigned int get_host_send_mask(SCLPEventFacility *ef)
|
||||
{
|
||||
unsigned int mask;
|
||||
BusChild *kid;
|
||||
SCLPEventClass *child;
|
||||
|
||||
mask = 0;
|
||||
|
||||
QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
|
||||
DeviceState *qdev = kid->child;
|
||||
child = SCLP_EVENT_GET_CLASS((SCLPEvent *) qdev);
|
||||
mask |= child->get_send_mask();
|
||||
}
|
||||
return mask;
|
||||
}
|
||||
|
||||
static unsigned int get_host_receive_mask(SCLPEventFacility *ef)
|
||||
{
|
||||
unsigned int mask;
|
||||
BusChild *kid;
|
||||
SCLPEventClass *child;
|
||||
|
||||
mask = 0;
|
||||
|
||||
QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
|
||||
DeviceState *qdev = kid->child;
|
||||
child = SCLP_EVENT_GET_CLASS((SCLPEvent *) qdev);
|
||||
mask |= child->get_receive_mask();
|
||||
}
|
||||
return mask;
|
||||
}
|
||||
|
||||
static uint16_t write_event_length_check(SCCB *sccb)
|
||||
{
|
||||
int slen;
|
||||
unsigned elen = 0;
|
||||
EventBufferHeader *event;
|
||||
WriteEventData *wed = (WriteEventData *) sccb;
|
||||
|
||||
event = (EventBufferHeader *) &wed->ebh;
|
||||
for (slen = sccb_data_len(sccb); slen > 0; slen -= elen) {
|
||||
elen = be16_to_cpu(event->length);
|
||||
if (elen < sizeof(*event) || elen > slen) {
|
||||
return SCLP_RC_EVENT_BUFFER_SYNTAX_ERROR;
|
||||
}
|
||||
event = (void *) event + elen;
|
||||
}
|
||||
if (slen) {
|
||||
return SCLP_RC_INCONSISTENT_LENGTHS;
|
||||
}
|
||||
return SCLP_RC_NORMAL_COMPLETION;
|
||||
}
|
||||
|
||||
static uint16_t handle_write_event_buf(SCLPEventFacility *ef,
|
||||
EventBufferHeader *event_buf, SCCB *sccb)
|
||||
{
|
||||
uint16_t rc;
|
||||
BusChild *kid;
|
||||
SCLPEvent *event;
|
||||
SCLPEventClass *ec;
|
||||
|
||||
QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
|
||||
DeviceState *qdev = kid->child;
|
||||
event = (SCLPEvent *) qdev;
|
||||
ec = SCLP_EVENT_GET_CLASS(event);
|
||||
|
||||
rc = SCLP_RC_INVALID_FUNCTION;
|
||||
if (ec->write_event_data &&
|
||||
ec->event_type() == event_buf->type) {
|
||||
rc = ec->write_event_data(event, event_buf);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static uint16_t handle_sccb_write_events(SCLPEventFacility *ef, SCCB *sccb)
|
||||
{
|
||||
uint16_t rc;
|
||||
int slen;
|
||||
unsigned elen = 0;
|
||||
EventBufferHeader *event_buf;
|
||||
WriteEventData *wed = (WriteEventData *) sccb;
|
||||
|
||||
event_buf = &wed->ebh;
|
||||
rc = SCLP_RC_NORMAL_COMPLETION;
|
||||
|
||||
/* loop over all contained event buffers */
|
||||
for (slen = sccb_data_len(sccb); slen > 0; slen -= elen) {
|
||||
elen = be16_to_cpu(event_buf->length);
|
||||
|
||||
/* in case of a previous error mark all trailing buffers
|
||||
* as not accepted */
|
||||
if (rc != SCLP_RC_NORMAL_COMPLETION) {
|
||||
event_buf->flags &= ~(SCLP_EVENT_BUFFER_ACCEPTED);
|
||||
} else {
|
||||
rc = handle_write_event_buf(ef, event_buf, sccb);
|
||||
}
|
||||
event_buf = (void *) event_buf + elen;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void write_event_data(SCLPEventFacility *ef, SCCB *sccb)
|
||||
{
|
||||
if (sccb->h.function_code != SCLP_FC_NORMAL_WRITE) {
|
||||
sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_FUNCTION);
|
||||
goto out;
|
||||
}
|
||||
if (be16_to_cpu(sccb->h.length) < 8) {
|
||||
sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH);
|
||||
goto out;
|
||||
}
|
||||
/* first do a sanity check of the write events */
|
||||
sccb->h.response_code = cpu_to_be16(write_event_length_check(sccb));
|
||||
|
||||
/* if no early error, then execute */
|
||||
if (sccb->h.response_code == be16_to_cpu(SCLP_RC_NORMAL_COMPLETION)) {
|
||||
sccb->h.response_code =
|
||||
cpu_to_be16(handle_sccb_write_events(ef, sccb));
|
||||
}
|
||||
|
||||
out:
|
||||
return;
|
||||
}
|
||||
|
||||
static uint16_t handle_sccb_read_events(SCLPEventFacility *ef, SCCB *sccb,
|
||||
unsigned int mask)
|
||||
{
|
||||
uint16_t rc;
|
||||
int slen;
|
||||
unsigned elen = 0;
|
||||
BusChild *kid;
|
||||
SCLPEvent *event;
|
||||
SCLPEventClass *ec;
|
||||
EventBufferHeader *event_buf;
|
||||
ReadEventData *red = (ReadEventData *) sccb;
|
||||
|
||||
event_buf = &red->ebh;
|
||||
event_buf->length = 0;
|
||||
slen = sizeof(sccb->data);
|
||||
|
||||
rc = SCLP_RC_NO_EVENT_BUFFERS_STORED;
|
||||
|
||||
QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
|
||||
DeviceState *qdev = kid->child;
|
||||
event = (SCLPEvent *) qdev;
|
||||
ec = SCLP_EVENT_GET_CLASS(event);
|
||||
|
||||
if (mask & ec->get_send_mask()) {
|
||||
if (ec->read_event_data(event, event_buf, &slen)) {
|
||||
rc = SCLP_RC_NORMAL_COMPLETION;
|
||||
}
|
||||
}
|
||||
elen = be16_to_cpu(event_buf->length);
|
||||
event_buf = (void *) event_buf + elen;
|
||||
}
|
||||
|
||||
if (sccb->h.control_mask[2] & SCLP_VARIABLE_LENGTH_RESPONSE) {
|
||||
/* architecture suggests to reset variable-length-response bit */
|
||||
sccb->h.control_mask[2] &= ~SCLP_VARIABLE_LENGTH_RESPONSE;
|
||||
/* with a new length value */
|
||||
sccb->h.length = cpu_to_be16(SCCB_SIZE - slen);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void read_event_data(SCLPEventFacility *ef, SCCB *sccb)
|
||||
{
|
||||
unsigned int sclp_active_selection_mask;
|
||||
unsigned int sclp_cp_receive_mask;
|
||||
|
||||
ReadEventData *red = (ReadEventData *) sccb;
|
||||
|
||||
if (be16_to_cpu(sccb->h.length) != SCCB_SIZE) {
|
||||
sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH);
|
||||
goto out;
|
||||
}
|
||||
|
||||
sclp_cp_receive_mask = ef->receive_mask;
|
||||
|
||||
/* get active selection mask */
|
||||
switch (sccb->h.function_code) {
|
||||
case SCLP_UNCONDITIONAL_READ:
|
||||
sclp_active_selection_mask = sclp_cp_receive_mask;
|
||||
break;
|
||||
case SCLP_SELECTIVE_READ:
|
||||
if (!(sclp_cp_receive_mask & be32_to_cpu(red->mask))) {
|
||||
sccb->h.response_code =
|
||||
cpu_to_be16(SCLP_RC_INVALID_SELECTION_MASK);
|
||||
goto out;
|
||||
}
|
||||
sclp_active_selection_mask = be32_to_cpu(red->mask);
|
||||
break;
|
||||
default:
|
||||
sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_FUNCTION);
|
||||
goto out;
|
||||
}
|
||||
sccb->h.response_code = cpu_to_be16(
|
||||
handle_sccb_read_events(ef, sccb, sclp_active_selection_mask));
|
||||
|
||||
out:
|
||||
return;
|
||||
}
|
||||
|
||||
static void write_event_mask(SCLPEventFacility *ef, SCCB *sccb)
|
||||
{
|
||||
WriteEventMask *we_mask = (WriteEventMask *) sccb;
|
||||
|
||||
/* Attention: We assume that Linux uses 4-byte masks, what it actually
|
||||
does. Architecture allows for masks of variable size, though */
|
||||
if (be16_to_cpu(we_mask->mask_length) != 4) {
|
||||
sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_MASK_LENGTH);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* keep track of the guest's capability masks */
|
||||
ef->receive_mask = be32_to_cpu(we_mask->cp_receive_mask);
|
||||
|
||||
/* return the SCLP's capability masks to the guest */
|
||||
we_mask->send_mask = cpu_to_be32(get_host_send_mask(ef));
|
||||
we_mask->receive_mask = cpu_to_be32(get_host_receive_mask(ef));
|
||||
|
||||
sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
|
||||
|
||||
out:
|
||||
return;
|
||||
}
|
||||
|
||||
/* qemu object creation and initialization functions */
|
||||
|
||||
#define TYPE_SCLP_EVENTS_BUS "s390-sclp-events-bus"
|
||||
|
||||
static void sclp_events_bus_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
}
|
||||
|
||||
static const TypeInfo s390_sclp_events_bus_info = {
|
||||
.name = TYPE_SCLP_EVENTS_BUS,
|
||||
.parent = TYPE_BUS,
|
||||
.class_init = sclp_events_bus_class_init,
|
||||
};
|
||||
|
||||
static void command_handler(SCLPEventFacility *ef, SCCB *sccb, uint64_t code)
|
||||
{
|
||||
switch (code) {
|
||||
case SCLP_CMD_READ_EVENT_DATA:
|
||||
read_event_data(ef, sccb);
|
||||
break;
|
||||
case SCLP_CMD_WRITE_EVENT_DATA:
|
||||
write_event_data(ef, sccb);
|
||||
break;
|
||||
case SCLP_CMD_WRITE_EVENT_MASK:
|
||||
write_event_mask(ef, sccb);
|
||||
break;
|
||||
default:
|
||||
sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int init_event_facility(S390SCLPDevice *sdev)
|
||||
{
|
||||
SCLPEventFacility *event_facility;
|
||||
DeviceState *quiesce;
|
||||
|
||||
event_facility = g_malloc0(sizeof(SCLPEventFacility));
|
||||
sdev->ef = event_facility;
|
||||
sdev->sclp_command_handler = command_handler;
|
||||
sdev->event_pending = event_pending;
|
||||
|
||||
/* Spawn a new sclp-events facility */
|
||||
qbus_create_inplace(&event_facility->sbus.qbus,
|
||||
TYPE_SCLP_EVENTS_BUS, (DeviceState *)sdev, NULL);
|
||||
event_facility->sbus.qbus.allow_hotplug = 0;
|
||||
event_facility->qdev = (DeviceState *) sdev;
|
||||
|
||||
quiesce = qdev_create(&event_facility->sbus.qbus, "sclpquiesce");
|
||||
if (!quiesce) {
|
||||
return -1;
|
||||
}
|
||||
qdev_init_nofail(quiesce);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void init_event_facility_class(ObjectClass *klass, void *data)
|
||||
{
|
||||
S390SCLPDeviceClass *k = SCLP_S390_DEVICE_CLASS(klass);
|
||||
|
||||
k->init = init_event_facility;
|
||||
}
|
||||
|
||||
static TypeInfo s390_sclp_event_facility_info = {
|
||||
.name = "s390-sclp-event-facility",
|
||||
.parent = TYPE_DEVICE_S390_SCLP,
|
||||
.instance_size = sizeof(S390SCLPDevice),
|
||||
.class_init = init_event_facility_class,
|
||||
};
|
||||
|
||||
static int event_qdev_init(DeviceState *qdev)
|
||||
{
|
||||
SCLPEvent *event = DO_UPCAST(SCLPEvent, qdev, qdev);
|
||||
SCLPEventClass *child = SCLP_EVENT_GET_CLASS(event);
|
||||
|
||||
return child->init(event);
|
||||
}
|
||||
|
||||
static int event_qdev_exit(DeviceState *qdev)
|
||||
{
|
||||
SCLPEvent *event = DO_UPCAST(SCLPEvent, qdev, qdev);
|
||||
SCLPEventClass *child = SCLP_EVENT_GET_CLASS(event);
|
||||
if (child->exit) {
|
||||
child->exit(event);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void event_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
|
||||
dc->bus_type = TYPE_SCLP_EVENTS_BUS;
|
||||
dc->unplug = qdev_simple_unplug_cb;
|
||||
dc->init = event_qdev_init;
|
||||
dc->exit = event_qdev_exit;
|
||||
}
|
||||
|
||||
static TypeInfo s390_sclp_event_type_info = {
|
||||
.name = TYPE_SCLP_EVENT,
|
||||
.parent = TYPE_DEVICE,
|
||||
.instance_size = sizeof(SCLPEvent),
|
||||
.class_init = event_class_init,
|
||||
.class_size = sizeof(SCLPEventClass),
|
||||
.abstract = true,
|
||||
};
|
||||
|
||||
static void register_types(void)
|
||||
{
|
||||
type_register_static(&s390_sclp_events_bus_info);
|
||||
type_register_static(&s390_sclp_event_facility_info);
|
||||
type_register_static(&s390_sclp_event_type_info);
|
||||
}
|
||||
|
||||
type_init(register_types)
|
96
hw/s390x/event-facility.h
Normal file
96
hw/s390x/event-facility.h
Normal file
|
@ -0,0 +1,96 @@
|
|||
/*
|
||||
* SCLP
|
||||
* Event Facility definitions
|
||||
*
|
||||
* Copyright IBM, Corp. 2012
|
||||
*
|
||||
* Authors:
|
||||
* Heinz Graalfs <graalfs@de.ibm.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or (at your
|
||||
* option) any later version. See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef HW_S390_SCLP_EVENT_FACILITY_H
|
||||
#define HW_S390_SCLP_EVENT_FACILITY_H
|
||||
|
||||
#include <hw/qdev.h>
|
||||
#include "qemu-thread.h"
|
||||
|
||||
/* SCLP event types */
|
||||
#define SCLP_EVENT_ASCII_CONSOLE_DATA 0x1a
|
||||
#define SCLP_EVENT_SIGNAL_QUIESCE 0x1d
|
||||
|
||||
/* SCLP event masks */
|
||||
#define SCLP_EVENT_MASK_SIGNAL_QUIESCE 0x00000008
|
||||
#define SCLP_EVENT_MASK_MSG_ASCII 0x00000040
|
||||
|
||||
#define SCLP_UNCONDITIONAL_READ 0x00
|
||||
#define SCLP_SELECTIVE_READ 0x01
|
||||
|
||||
#define TYPE_SCLP_EVENT "s390-sclp-event-type"
|
||||
#define SCLP_EVENT(obj) \
|
||||
OBJECT_CHECK(SCLPEvent, (obj), TYPE_SCLP_EVENT)
|
||||
#define SCLP_EVENT_CLASS(klass) \
|
||||
OBJECT_CLASS_CHECK(SCLPEventClass, (klass), TYPE_SCLP_EVENT)
|
||||
#define SCLP_EVENT_GET_CLASS(obj) \
|
||||
OBJECT_GET_CLASS(SCLPEventClass, (obj), TYPE_SCLP_EVENT)
|
||||
|
||||
typedef struct WriteEventMask {
|
||||
SCCBHeader h;
|
||||
uint16_t _reserved;
|
||||
uint16_t mask_length;
|
||||
uint32_t cp_receive_mask;
|
||||
uint32_t cp_send_mask;
|
||||
uint32_t send_mask;
|
||||
uint32_t receive_mask;
|
||||
} QEMU_PACKED WriteEventMask;
|
||||
|
||||
typedef struct EventBufferHeader {
|
||||
uint16_t length;
|
||||
uint8_t type;
|
||||
uint8_t flags;
|
||||
uint16_t _reserved;
|
||||
} QEMU_PACKED EventBufferHeader;
|
||||
|
||||
typedef struct WriteEventData {
|
||||
SCCBHeader h;
|
||||
EventBufferHeader ebh;
|
||||
} QEMU_PACKED WriteEventData;
|
||||
|
||||
typedef struct ReadEventData {
|
||||
SCCBHeader h;
|
||||
EventBufferHeader ebh;
|
||||
uint32_t mask;
|
||||
} QEMU_PACKED ReadEventData;
|
||||
|
||||
typedef struct SCLPEvent {
|
||||
DeviceState qdev;
|
||||
bool event_pending;
|
||||
uint32_t event_type;
|
||||
char *name;
|
||||
} SCLPEvent;
|
||||
|
||||
typedef struct SCLPEventClass {
|
||||
DeviceClass parent_class;
|
||||
int (*init)(SCLPEvent *event);
|
||||
int (*exit)(SCLPEvent *event);
|
||||
|
||||
/* get SCLP's send mask */
|
||||
unsigned int (*get_send_mask)(void);
|
||||
|
||||
/* get SCLP's receive mask */
|
||||
unsigned int (*get_receive_mask)(void);
|
||||
|
||||
int (*read_event_data)(SCLPEvent *event, EventBufferHeader *evt_buf_hdr,
|
||||
int *slen);
|
||||
|
||||
int (*write_event_data)(SCLPEvent *event, EventBufferHeader *evt_buf_hdr);
|
||||
|
||||
/* returns the supported event type */
|
||||
int (*event_type)(void);
|
||||
|
||||
} SCLPEventClass;
|
||||
|
||||
#endif
|
163
hw/s390x/sclp.c
Normal file
163
hw/s390x/sclp.c
Normal file
|
@ -0,0 +1,163 @@
|
|||
/*
|
||||
* SCLP Support
|
||||
*
|
||||
* Copyright IBM, Corp. 2012
|
||||
*
|
||||
* Authors:
|
||||
* Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
* Heinz Graalfs <graalfs@linux.vnet.ibm.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or (at your
|
||||
* option) any later version. See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "cpu.h"
|
||||
#include "kvm.h"
|
||||
#include "memory.h"
|
||||
|
||||
#include "sclp.h"
|
||||
|
||||
static inline S390SCLPDevice *get_event_facility(void)
|
||||
{
|
||||
ObjectProperty *op = object_property_find(qdev_get_machine(),
|
||||
"s390-sclp-event-facility",
|
||||
NULL);
|
||||
assert(op);
|
||||
return op->opaque;
|
||||
}
|
||||
|
||||
/* Provide information about the configuration, CPUs and storage */
|
||||
static void read_SCP_info(SCCB *sccb)
|
||||
{
|
||||
ReadInfo *read_info = (ReadInfo *) sccb;
|
||||
int shift = 0;
|
||||
|
||||
while ((ram_size >> (20 + shift)) > 65535) {
|
||||
shift++;
|
||||
}
|
||||
read_info->rnmax = cpu_to_be16(ram_size >> (20 + shift));
|
||||
read_info->rnsize = 1 << shift;
|
||||
sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
|
||||
}
|
||||
|
||||
static void sclp_execute(SCCB *sccb, uint64_t code)
|
||||
{
|
||||
S390SCLPDevice *sdev = get_event_facility();
|
||||
|
||||
switch (code) {
|
||||
case SCLP_CMDW_READ_SCP_INFO:
|
||||
case SCLP_CMDW_READ_SCP_INFO_FORCED:
|
||||
read_SCP_info(sccb);
|
||||
break;
|
||||
default:
|
||||
sdev->sclp_command_handler(sdev->ef, sccb, code);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int sclp_service_call(uint32_t sccb, uint64_t code)
|
||||
{
|
||||
int r = 0;
|
||||
SCCB work_sccb;
|
||||
|
||||
hwaddr sccb_len = sizeof(SCCB);
|
||||
|
||||
/* first some basic checks on program checks */
|
||||
if (cpu_physical_memory_is_io(sccb)) {
|
||||
r = -PGM_ADDRESSING;
|
||||
goto out;
|
||||
}
|
||||
if (sccb & ~0x7ffffff8ul) {
|
||||
r = -PGM_SPECIFICATION;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* we want to work on a private copy of the sccb, to prevent guests
|
||||
* from playing dirty tricks by modifying the memory content after
|
||||
* the host has checked the values
|
||||
*/
|
||||
cpu_physical_memory_read(sccb, &work_sccb, sccb_len);
|
||||
|
||||
/* Valid sccb sizes */
|
||||
if (be16_to_cpu(work_sccb.h.length) < sizeof(SCCBHeader) ||
|
||||
be16_to_cpu(work_sccb.h.length) > SCCB_SIZE) {
|
||||
r = -PGM_SPECIFICATION;
|
||||
goto out;
|
||||
}
|
||||
|
||||
sclp_execute((SCCB *)&work_sccb, code);
|
||||
|
||||
cpu_physical_memory_write(sccb, &work_sccb,
|
||||
be16_to_cpu(work_sccb.h.length));
|
||||
|
||||
sclp_service_interrupt(sccb);
|
||||
|
||||
out:
|
||||
return r;
|
||||
}
|
||||
|
||||
void sclp_service_interrupt(uint32_t sccb)
|
||||
{
|
||||
S390SCLPDevice *sdev = get_event_facility();
|
||||
uint32_t param = sccb & ~3;
|
||||
|
||||
/* Indicate whether an event is still pending */
|
||||
param |= sdev->event_pending(sdev->ef) ? 1 : 0;
|
||||
|
||||
if (!param) {
|
||||
/* No need to send an interrupt, there's nothing to be notified about */
|
||||
return;
|
||||
}
|
||||
s390_sclp_extint(param);
|
||||
}
|
||||
|
||||
/* qemu object creation and initialization functions */
|
||||
|
||||
void s390_sclp_init(void)
|
||||
{
|
||||
DeviceState *dev = qdev_create(NULL, "s390-sclp-event-facility");
|
||||
|
||||
object_property_add_child(qdev_get_machine(), "s390-sclp-event-facility",
|
||||
OBJECT(dev), NULL);
|
||||
qdev_init_nofail(dev);
|
||||
}
|
||||
|
||||
static int s390_sclp_dev_init(SysBusDevice *dev)
|
||||
{
|
||||
int r;
|
||||
S390SCLPDevice *sdev = (S390SCLPDevice *)dev;
|
||||
S390SCLPDeviceClass *sclp = SCLP_S390_DEVICE_GET_CLASS(dev);
|
||||
|
||||
r = sclp->init(sdev);
|
||||
if (!r) {
|
||||
assert(sdev->event_pending);
|
||||
assert(sdev->sclp_command_handler);
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static void s390_sclp_device_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
SysBusDeviceClass *dc = SYS_BUS_DEVICE_CLASS(klass);
|
||||
|
||||
dc->init = s390_sclp_dev_init;
|
||||
}
|
||||
|
||||
static TypeInfo s390_sclp_device_info = {
|
||||
.name = TYPE_DEVICE_S390_SCLP,
|
||||
.parent = TYPE_SYS_BUS_DEVICE,
|
||||
.instance_size = sizeof(S390SCLPDevice),
|
||||
.class_init = s390_sclp_device_class_init,
|
||||
.class_size = sizeof(S390SCLPDeviceClass),
|
||||
.abstract = true,
|
||||
};
|
||||
|
||||
static void s390_sclp_register_types(void)
|
||||
{
|
||||
type_register_static(&s390_sclp_device_info);
|
||||
}
|
||||
|
||||
type_init(s390_sclp_register_types)
|
118
hw/s390x/sclp.h
Normal file
118
hw/s390x/sclp.h
Normal file
|
@ -0,0 +1,118 @@
|
|||
/*
|
||||
* SCLP Support
|
||||
*
|
||||
* Copyright IBM, Corp. 2012
|
||||
*
|
||||
* Authors:
|
||||
* Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or (at your
|
||||
* option) any later version. See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef HW_S390_SCLP_H
|
||||
#define HW_S390_SCLP_H
|
||||
|
||||
#include <hw/sysbus.h>
|
||||
#include <hw/qdev.h>
|
||||
|
||||
/* SCLP command codes */
|
||||
#define SCLP_CMDW_READ_SCP_INFO 0x00020001
|
||||
#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
|
||||
#define SCLP_CMD_READ_EVENT_DATA 0x00770005
|
||||
#define SCLP_CMD_WRITE_EVENT_DATA 0x00760005
|
||||
#define SCLP_CMD_READ_EVENT_DATA 0x00770005
|
||||
#define SCLP_CMD_WRITE_EVENT_DATA 0x00760005
|
||||
#define SCLP_CMD_WRITE_EVENT_MASK 0x00780005
|
||||
|
||||
/* SCLP response codes */
|
||||
#define SCLP_RC_NORMAL_READ_COMPLETION 0x0010
|
||||
#define SCLP_RC_NORMAL_COMPLETION 0x0020
|
||||
#define SCLP_RC_INVALID_SCLP_COMMAND 0x01f0
|
||||
#define SCLP_RC_CONTAINED_EQUIPMENT_CHECK 0x0340
|
||||
#define SCLP_RC_INSUFFICIENT_SCCB_LENGTH 0x0300
|
||||
#define SCLP_RC_INVALID_FUNCTION 0x40f0
|
||||
#define SCLP_RC_NO_EVENT_BUFFERS_STORED 0x60f0
|
||||
#define SCLP_RC_INVALID_SELECTION_MASK 0x70f0
|
||||
#define SCLP_RC_INCONSISTENT_LENGTHS 0x72f0
|
||||
#define SCLP_RC_EVENT_BUFFER_SYNTAX_ERROR 0x73f0
|
||||
#define SCLP_RC_INVALID_MASK_LENGTH 0x74f0
|
||||
|
||||
|
||||
/* Service Call Control Block (SCCB) and its elements */
|
||||
|
||||
#define SCCB_SIZE 4096
|
||||
|
||||
#define SCLP_VARIABLE_LENGTH_RESPONSE 0x80
|
||||
#define SCLP_EVENT_BUFFER_ACCEPTED 0x80
|
||||
|
||||
#define SCLP_FC_NORMAL_WRITE 0
|
||||
|
||||
/*
|
||||
* Normally packed structures are not the right thing to do, since all code
|
||||
* must take care of endianess. We cant use ldl_phys and friends for two
|
||||
* reasons, though:
|
||||
* - some of the embedded structures below the SCCB can appear multiple times
|
||||
* at different locations, so there is no fixed offset
|
||||
* - we work on a private copy of the SCCB, since there are several length
|
||||
* fields, that would cause a security nightmare if we allow the guest to
|
||||
* alter the structure while we parse it. We cannot use ldl_p and friends
|
||||
* either without doing pointer arithmetics
|
||||
* So we have to double check that all users of sclp data structures use the
|
||||
* right endianess wrappers.
|
||||
*/
|
||||
typedef struct SCCBHeader {
|
||||
uint16_t length;
|
||||
uint8_t function_code;
|
||||
uint8_t control_mask[3];
|
||||
uint16_t response_code;
|
||||
} QEMU_PACKED SCCBHeader;
|
||||
|
||||
#define SCCB_DATA_LEN (SCCB_SIZE - sizeof(SCCBHeader))
|
||||
|
||||
typedef struct ReadInfo {
|
||||
SCCBHeader h;
|
||||
uint16_t rnmax;
|
||||
uint8_t rnsize;
|
||||
} QEMU_PACKED ReadInfo;
|
||||
|
||||
typedef struct SCCB {
|
||||
SCCBHeader h;
|
||||
char data[SCCB_DATA_LEN];
|
||||
} QEMU_PACKED SCCB;
|
||||
|
||||
static inline int sccb_data_len(SCCB *sccb)
|
||||
{
|
||||
return be16_to_cpu(sccb->h.length) - sizeof(sccb->h);
|
||||
}
|
||||
|
||||
#define TYPE_DEVICE_S390_SCLP "s390-sclp-device"
|
||||
#define SCLP_S390_DEVICE(obj) \
|
||||
OBJECT_CHECK(S390SCLPDevice, (obj), TYPE_DEVICE_S390_SCLP)
|
||||
#define SCLP_S390_DEVICE_CLASS(klass) \
|
||||
OBJECT_CLASS_CHECK(S390SCLPDeviceClass, (klass), \
|
||||
TYPE_DEVICE_S390_SCLP)
|
||||
#define SCLP_S390_DEVICE_GET_CLASS(obj) \
|
||||
OBJECT_GET_CLASS(S390SCLPDeviceClass, (obj), \
|
||||
TYPE_DEVICE_S390_SCLP)
|
||||
|
||||
typedef struct SCLPEventFacility SCLPEventFacility;
|
||||
|
||||
typedef struct S390SCLPDevice {
|
||||
SysBusDevice busdev;
|
||||
SCLPEventFacility *ef;
|
||||
void (*sclp_command_handler)(SCLPEventFacility *ef, SCCB *sccb,
|
||||
uint64_t code);
|
||||
bool (*event_pending)(SCLPEventFacility *ef);
|
||||
} S390SCLPDevice;
|
||||
|
||||
typedef struct S390SCLPDeviceClass {
|
||||
DeviceClass qdev;
|
||||
int (*init)(S390SCLPDevice *sdev);
|
||||
} S390SCLPDeviceClass;
|
||||
|
||||
void s390_sclp_init(void);
|
||||
void sclp_service_interrupt(uint32_t sccb);
|
||||
|
||||
#endif
|
306
hw/s390x/sclpconsole.c
Normal file
306
hw/s390x/sclpconsole.c
Normal file
|
@ -0,0 +1,306 @@
|
|||
/*
|
||||
* SCLP event type
|
||||
* Ascii Console Data (VT220 Console)
|
||||
*
|
||||
* Copyright IBM, Corp. 2012
|
||||
*
|
||||
* Authors:
|
||||
* Heinz Graalfs <graalfs@de.ibm.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or (at your
|
||||
* option) any later version. See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <hw/qdev.h>
|
||||
#include "qemu-thread.h"
|
||||
|
||||
#include "sclp.h"
|
||||
#include "event-facility.h"
|
||||
|
||||
typedef struct ASCIIConsoleData {
|
||||
EventBufferHeader ebh;
|
||||
char data[0];
|
||||
} QEMU_PACKED ASCIIConsoleData;
|
||||
|
||||
/* max size for ASCII data in 4K SCCB page */
|
||||
#define SIZE_BUFFER_VT220 4080
|
||||
|
||||
typedef struct SCLPConsole {
|
||||
SCLPEvent event;
|
||||
CharDriverState *chr;
|
||||
/* io vector */
|
||||
uint8_t *iov; /* iov buffer pointer */
|
||||
uint8_t *iov_sclp; /* pointer to SCLP read offset */
|
||||
uint8_t *iov_bs; /* pointer byte stream read offset */
|
||||
uint32_t iov_data_len; /* length of byte stream in buffer */
|
||||
uint32_t iov_sclp_rest; /* length of byte stream not read via SCLP */
|
||||
qemu_irq irq_read_vt220;
|
||||
} SCLPConsole;
|
||||
|
||||
/* character layer call-back functions */
|
||||
|
||||
/* Return number of bytes that fit into iov buffer */
|
||||
static int chr_can_read(void *opaque)
|
||||
{
|
||||
int can_read;
|
||||
SCLPConsole *scon = opaque;
|
||||
|
||||
can_read = SIZE_BUFFER_VT220 - scon->iov_data_len;
|
||||
|
||||
return can_read;
|
||||
}
|
||||
|
||||
/* Receive n bytes from character layer, save in iov buffer,
|
||||
* and set event pending */
|
||||
static void receive_from_chr_layer(SCLPConsole *scon, const uint8_t *buf,
|
||||
int size)
|
||||
{
|
||||
assert(scon->iov);
|
||||
|
||||
/* read data must fit into current buffer */
|
||||
assert(size <= SIZE_BUFFER_VT220 - scon->iov_data_len);
|
||||
|
||||
/* put byte-stream from character layer into buffer */
|
||||
memcpy(scon->iov_bs, buf, size);
|
||||
scon->iov_data_len += size;
|
||||
scon->iov_sclp_rest += size;
|
||||
scon->iov_bs += size;
|
||||
scon->event.event_pending = true;
|
||||
}
|
||||
|
||||
/* Send data from a char device over to the guest */
|
||||
static void chr_read(void *opaque, const uint8_t *buf, int size)
|
||||
{
|
||||
SCLPConsole *scon = opaque;
|
||||
|
||||
assert(scon);
|
||||
|
||||
receive_from_chr_layer(scon, buf, size);
|
||||
/* trigger SCLP read operation */
|
||||
qemu_irq_raise(scon->irq_read_vt220);
|
||||
}
|
||||
|
||||
static void chr_event(void *opaque, int event)
|
||||
{
|
||||
SCLPConsole *scon = opaque;
|
||||
|
||||
switch (event) {
|
||||
case CHR_EVENT_OPENED:
|
||||
if (!scon->iov) {
|
||||
scon->iov = g_malloc0(SIZE_BUFFER_VT220);
|
||||
scon->iov_sclp = scon->iov;
|
||||
scon->iov_bs = scon->iov;
|
||||
scon->iov_data_len = 0;
|
||||
scon->iov_sclp_rest = 0;
|
||||
}
|
||||
break;
|
||||
case CHR_EVENT_CLOSED:
|
||||
if (scon->iov) {
|
||||
g_free(scon->iov);
|
||||
scon->iov = NULL;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* functions to be called by event facility */
|
||||
|
||||
static int event_type(void)
|
||||
{
|
||||
return SCLP_EVENT_ASCII_CONSOLE_DATA;
|
||||
}
|
||||
|
||||
static unsigned int send_mask(void)
|
||||
{
|
||||
return SCLP_EVENT_MASK_MSG_ASCII;
|
||||
}
|
||||
|
||||
static unsigned int receive_mask(void)
|
||||
{
|
||||
return SCLP_EVENT_MASK_MSG_ASCII;
|
||||
}
|
||||
|
||||
/* triggered by SCLP's read_event_data -
|
||||
* copy console data byte-stream into provided (SCLP) buffer
|
||||
*/
|
||||
static void get_console_data(SCLPEvent *event, uint8_t *buf, size_t *size,
|
||||
int avail)
|
||||
{
|
||||
SCLPConsole *cons = DO_UPCAST(SCLPConsole, event, event);
|
||||
|
||||
/* first byte is hex 0 saying an ascii string follows */
|
||||
*buf++ = '\0';
|
||||
avail--;
|
||||
/* if all data fit into provided SCLP buffer */
|
||||
if (avail >= cons->iov_sclp_rest) {
|
||||
/* copy character byte-stream to SCLP buffer */
|
||||
memcpy(buf, cons->iov_sclp, cons->iov_sclp_rest);
|
||||
*size = cons->iov_sclp_rest + 1;
|
||||
cons->iov_sclp = cons->iov;
|
||||
cons->iov_bs = cons->iov;
|
||||
cons->iov_data_len = 0;
|
||||
cons->iov_sclp_rest = 0;
|
||||
event->event_pending = false;
|
||||
/* data provided and no more data pending */
|
||||
} else {
|
||||
/* if provided buffer is too small, just copy part */
|
||||
memcpy(buf, cons->iov_sclp, avail);
|
||||
*size = avail + 1;
|
||||
cons->iov_sclp_rest -= avail;
|
||||
cons->iov_sclp += avail;
|
||||
/* more data pending */
|
||||
}
|
||||
}
|
||||
|
||||
static int read_event_data(SCLPEvent *event, EventBufferHeader *evt_buf_hdr,
|
||||
int *slen)
|
||||
{
|
||||
int avail;
|
||||
size_t src_len;
|
||||
uint8_t *to;
|
||||
ASCIIConsoleData *acd = (ASCIIConsoleData *) evt_buf_hdr;
|
||||
|
||||
if (!event->event_pending) {
|
||||
/* no data pending */
|
||||
return 0;
|
||||
}
|
||||
|
||||
to = (uint8_t *)&acd->data;
|
||||
avail = *slen - sizeof(ASCIIConsoleData);
|
||||
get_console_data(event, to, &src_len, avail);
|
||||
|
||||
acd->ebh.length = cpu_to_be16(sizeof(ASCIIConsoleData) + src_len);
|
||||
acd->ebh.type = SCLP_EVENT_ASCII_CONSOLE_DATA;
|
||||
acd->ebh.flags |= SCLP_EVENT_BUFFER_ACCEPTED;
|
||||
*slen = avail - src_len;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* triggered by SCLP's write_event_data
|
||||
* - write console data into character layer
|
||||
* returns < 0 if an error occured
|
||||
*/
|
||||
static ssize_t write_console_data(SCLPEvent *event, const uint8_t *buf,
|
||||
size_t len)
|
||||
{
|
||||
ssize_t ret = 0;
|
||||
const uint8_t *iov_offset;
|
||||
SCLPConsole *scon = DO_UPCAST(SCLPConsole, event, event);
|
||||
|
||||
if (!scon->chr) {
|
||||
/* If there's no backend, we can just say we consumed all data. */
|
||||
return len;
|
||||
}
|
||||
|
||||
iov_offset = buf;
|
||||
while (len > 0) {
|
||||
ret = qemu_chr_fe_write(scon->chr, buf, len);
|
||||
if (ret == 0) {
|
||||
/* a pty doesn't seem to be connected - no error */
|
||||
len = 0;
|
||||
} else if (ret == -EAGAIN || (ret > 0 && ret < len)) {
|
||||
len -= ret;
|
||||
iov_offset += ret;
|
||||
} else {
|
||||
len = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int write_event_data(SCLPEvent *event, EventBufferHeader *evt_buf_hdr)
|
||||
{
|
||||
int rc;
|
||||
int length;
|
||||
ssize_t written;
|
||||
ASCIIConsoleData *acd = (ASCIIConsoleData *) evt_buf_hdr;
|
||||
|
||||
length = be16_to_cpu(evt_buf_hdr->length) - sizeof(EventBufferHeader);
|
||||
written = write_console_data(event, (uint8_t *)acd->data, length);
|
||||
|
||||
rc = SCLP_RC_NORMAL_COMPLETION;
|
||||
/* set event buffer accepted flag */
|
||||
evt_buf_hdr->flags |= SCLP_EVENT_BUFFER_ACCEPTED;
|
||||
|
||||
/* written will be zero if a pty is not connected - don't treat as error */
|
||||
if (written < 0) {
|
||||
/* event buffer not accepted due to error in character layer */
|
||||
evt_buf_hdr->flags &= ~(SCLP_EVENT_BUFFER_ACCEPTED);
|
||||
rc = SCLP_RC_CONTAINED_EQUIPMENT_CHECK;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void trigger_ascii_console_data(void *env, int n, int level)
|
||||
{
|
||||
sclp_service_interrupt(0);
|
||||
}
|
||||
|
||||
/* qemu object creation and initialization functions */
|
||||
|
||||
/* tell character layer our call-back functions */
|
||||
static int console_init(SCLPEvent *event)
|
||||
{
|
||||
static bool console_available;
|
||||
|
||||
SCLPConsole *scon = DO_UPCAST(SCLPConsole, event, event);
|
||||
|
||||
if (console_available) {
|
||||
error_report("Multiple VT220 operator consoles are not supported");
|
||||
return -1;
|
||||
}
|
||||
console_available = true;
|
||||
event->event_type = SCLP_EVENT_ASCII_CONSOLE_DATA;
|
||||
if (scon->chr) {
|
||||
qemu_chr_add_handlers(scon->chr, chr_can_read,
|
||||
chr_read, chr_event, scon);
|
||||
}
|
||||
scon->irq_read_vt220 = *qemu_allocate_irqs(trigger_ascii_console_data,
|
||||
NULL, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int console_exit(SCLPEvent *event)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static Property console_properties[] = {
|
||||
DEFINE_PROP_CHR("chardev", SCLPConsole, chr),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static void console_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
SCLPEventClass *ec = SCLP_EVENT_CLASS(klass);
|
||||
|
||||
dc->props = console_properties;
|
||||
ec->init = console_init;
|
||||
ec->exit = console_exit;
|
||||
ec->get_send_mask = send_mask;
|
||||
ec->get_receive_mask = receive_mask;
|
||||
ec->event_type = event_type;
|
||||
ec->read_event_data = read_event_data;
|
||||
ec->write_event_data = write_event_data;
|
||||
}
|
||||
|
||||
static TypeInfo sclp_console_info = {
|
||||
.name = "sclpconsole",
|
||||
.parent = TYPE_SCLP_EVENT,
|
||||
.instance_size = sizeof(SCLPConsole),
|
||||
.class_init = console_class_init,
|
||||
.class_size = sizeof(SCLPEventClass),
|
||||
};
|
||||
|
||||
static void register_types(void)
|
||||
{
|
||||
type_register_static(&sclp_console_info);
|
||||
}
|
||||
|
||||
type_init(register_types)
|
123
hw/s390x/sclpquiesce.c
Normal file
123
hw/s390x/sclpquiesce.c
Normal file
|
@ -0,0 +1,123 @@
|
|||
/*
|
||||
* SCLP event type
|
||||
* Signal Quiesce - trigger system powerdown request
|
||||
*
|
||||
* Copyright IBM, Corp. 2012
|
||||
*
|
||||
* Authors:
|
||||
* Heinz Graalfs <graalfs@de.ibm.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or (at your
|
||||
* option) any later version. See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
#include <hw/qdev.h>
|
||||
#include "sysemu.h"
|
||||
#include "sclp.h"
|
||||
#include "event-facility.h"
|
||||
|
||||
typedef struct SignalQuiesce {
|
||||
EventBufferHeader ebh;
|
||||
uint16_t timeout;
|
||||
uint8_t unit;
|
||||
} QEMU_PACKED SignalQuiesce;
|
||||
|
||||
static int event_type(void)
|
||||
{
|
||||
return SCLP_EVENT_SIGNAL_QUIESCE;
|
||||
}
|
||||
|
||||
static unsigned int send_mask(void)
|
||||
{
|
||||
return SCLP_EVENT_MASK_SIGNAL_QUIESCE;
|
||||
}
|
||||
|
||||
static unsigned int receive_mask(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int read_event_data(SCLPEvent *event, EventBufferHeader *evt_buf_hdr,
|
||||
int *slen)
|
||||
{
|
||||
SignalQuiesce *sq = (SignalQuiesce *) evt_buf_hdr;
|
||||
|
||||
if (*slen < sizeof(SignalQuiesce)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!event->event_pending) {
|
||||
return 0;
|
||||
}
|
||||
event->event_pending = false;
|
||||
|
||||
sq->ebh.length = cpu_to_be16(sizeof(SignalQuiesce));
|
||||
sq->ebh.type = SCLP_EVENT_SIGNAL_QUIESCE;
|
||||
sq->ebh.flags |= SCLP_EVENT_BUFFER_ACCEPTED;
|
||||
/*
|
||||
* system_powerdown does not have a timeout. Fortunately the
|
||||
* timeout value is currently ignored by Linux, anyway
|
||||
*/
|
||||
sq->timeout = cpu_to_be16(0);
|
||||
sq->unit = cpu_to_be16(0);
|
||||
*slen -= sizeof(SignalQuiesce);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
typedef struct QuiesceNotifier QuiesceNotifier;
|
||||
|
||||
static struct QuiesceNotifier {
|
||||
Notifier notifier;
|
||||
SCLPEvent *event;
|
||||
} qn;
|
||||
|
||||
static void quiesce_powerdown_req(Notifier *n, void *opaque)
|
||||
{
|
||||
QuiesceNotifier *qn = container_of(n, QuiesceNotifier, notifier);
|
||||
SCLPEvent *event = qn->event;
|
||||
|
||||
event->event_pending = true;
|
||||
/* trigger SCLP read operation */
|
||||
sclp_service_interrupt(0);
|
||||
}
|
||||
|
||||
static int quiesce_init(SCLPEvent *event)
|
||||
{
|
||||
event->event_type = SCLP_EVENT_SIGNAL_QUIESCE;
|
||||
|
||||
qn.notifier.notify = quiesce_powerdown_req;
|
||||
qn.event = event;
|
||||
|
||||
qemu_register_powerdown_notifier(&qn.notifier);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void quiesce_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
SCLPEventClass *k = SCLP_EVENT_CLASS(klass);
|
||||
|
||||
k->init = quiesce_init;
|
||||
|
||||
k->get_send_mask = send_mask;
|
||||
k->get_receive_mask = receive_mask;
|
||||
k->event_type = event_type;
|
||||
k->read_event_data = read_event_data;
|
||||
k->write_event_data = NULL;
|
||||
}
|
||||
|
||||
static TypeInfo sclp_quiesce_info = {
|
||||
.name = "sclpquiesce",
|
||||
.parent = TYPE_SCLP_EVENT,
|
||||
.instance_size = sizeof(SCLPEvent),
|
||||
.class_init = quiesce_class_init,
|
||||
.class_size = sizeof(SCLPEventClass),
|
||||
};
|
||||
|
||||
static void register_types(void)
|
||||
{
|
||||
type_register_static(&sclp_quiesce_info);
|
||||
}
|
||||
|
||||
type_init(register_types)
|
30
hw/serial.c
30
hw/serial.c
|
@ -26,6 +26,7 @@
|
|||
#include "serial.h"
|
||||
#include "qemu-char.h"
|
||||
#include "qemu-timer.h"
|
||||
#include "exec-memory.h"
|
||||
|
||||
//#define DEBUG_SERIAL
|
||||
|
||||
|
@ -305,7 +306,8 @@ static void serial_xmit(void *opaque)
|
|||
}
|
||||
|
||||
|
||||
static void serial_ioport_write(void *opaque, uint32_t addr, uint32_t val)
|
||||
static void serial_ioport_write(void *opaque, hwaddr addr, uint64_t val,
|
||||
unsigned size)
|
||||
{
|
||||
SerialState *s = opaque;
|
||||
|
||||
|
@ -451,7 +453,7 @@ static void serial_ioport_write(void *opaque, uint32_t addr, uint32_t val)
|
|||
}
|
||||
}
|
||||
|
||||
static uint32_t serial_ioport_read(void *opaque, uint32_t addr)
|
||||
static uint64_t serial_ioport_read(void *opaque, hwaddr addr, unsigned size)
|
||||
{
|
||||
SerialState *s = opaque;
|
||||
uint32_t ret;
|
||||
|
@ -620,7 +622,7 @@ static int serial_post_load(void *opaque, int version_id)
|
|||
s->fcr_vmstate = 0;
|
||||
}
|
||||
/* Initialize fcr via setter to perform essential side-effects */
|
||||
serial_ioport_write(s, 0x02, s->fcr_vmstate);
|
||||
serial_ioport_write(s, 0x02, s->fcr_vmstate, 1);
|
||||
serial_update_parameters(s);
|
||||
return 0;
|
||||
}
|
||||
|
@ -705,13 +707,14 @@ void serial_set_frequency(SerialState *s, uint32_t frequency)
|
|||
serial_update_parameters(s);
|
||||
}
|
||||
|
||||
static const MemoryRegionPortio serial_portio[] = {
|
||||
{ 0, 8, 1, .read = serial_ioport_read, .write = serial_ioport_write },
|
||||
PORTIO_END_OF_LIST()
|
||||
};
|
||||
|
||||
const MemoryRegionOps serial_io_ops = {
|
||||
.old_portio = serial_portio
|
||||
.read = serial_ioport_read,
|
||||
.write = serial_ioport_write,
|
||||
.impl = {
|
||||
.min_access_size = 1,
|
||||
.max_access_size = 1,
|
||||
},
|
||||
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||
};
|
||||
|
||||
SerialState *serial_init(int base, qemu_irq irq, int baudbase,
|
||||
|
@ -728,8 +731,9 @@ SerialState *serial_init(int base, qemu_irq irq, int baudbase,
|
|||
|
||||
vmstate_register(NULL, base, &vmstate_serial, s);
|
||||
|
||||
register_ioport_write(base, 8, 1, serial_ioport_write, s);
|
||||
register_ioport_read(base, 8, 1, serial_ioport_read, s);
|
||||
memory_region_init_io(&s->io, &serial_io_ops, s, "serial", 8);
|
||||
memory_region_add_subregion(get_system_io(), base, &s->io);
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
|
@ -738,7 +742,7 @@ static uint64_t serial_mm_read(void *opaque, hwaddr addr,
|
|||
unsigned size)
|
||||
{
|
||||
SerialState *s = opaque;
|
||||
return serial_ioport_read(s, addr >> s->it_shift);
|
||||
return serial_ioport_read(s, addr >> s->it_shift, 1);
|
||||
}
|
||||
|
||||
static void serial_mm_write(void *opaque, hwaddr addr,
|
||||
|
@ -746,7 +750,7 @@ static void serial_mm_write(void *opaque, hwaddr addr,
|
|||
{
|
||||
SerialState *s = opaque;
|
||||
value &= ~0u >> (32 - (size * 8));
|
||||
serial_ioport_write(s, addr >> s->it_shift, value);
|
||||
serial_ioport_write(s, addr >> s->it_shift, value, 1);
|
||||
}
|
||||
|
||||
static const MemoryRegionOps serial_mm_ops[3] = {
|
||||
|
|
16
hw/spapr.c
16
hw/spapr.c
|
@ -232,7 +232,8 @@ static void *spapr_create_fdt_skel(const char *cpu_model,
|
|||
hwaddr initrd_size,
|
||||
hwaddr kernel_size,
|
||||
const char *boot_device,
|
||||
const char *kernel_cmdline)
|
||||
const char *kernel_cmdline,
|
||||
uint32_t epow_irq)
|
||||
{
|
||||
void *fdt;
|
||||
CPUPPCState *env;
|
||||
|
@ -403,6 +404,8 @@ static void *spapr_create_fdt_skel(const char *cpu_model,
|
|||
_FDT((fdt_property(fdt, "ibm,associativity-reference-points",
|
||||
refpoints, sizeof(refpoints))));
|
||||
|
||||
_FDT((fdt_property_cell(fdt, "rtas-error-log-max", RTAS_ERROR_LOG_MAX)));
|
||||
|
||||
_FDT((fdt_end_node(fdt)));
|
||||
|
||||
/* interrupt controller */
|
||||
|
@ -433,6 +436,9 @@ static void *spapr_create_fdt_skel(const char *cpu_model,
|
|||
|
||||
_FDT((fdt_end_node(fdt)));
|
||||
|
||||
/* event-sources */
|
||||
spapr_events_fdt_skel(fdt, epow_irq);
|
||||
|
||||
_FDT((fdt_end_node(fdt))); /* close root node */
|
||||
_FDT((fdt_finish(fdt)));
|
||||
|
||||
|
@ -795,6 +801,9 @@ static void ppc_spapr_init(QEMUMachineInitArgs *args)
|
|||
spapr->icp = xics_system_init(XICS_IRQS);
|
||||
spapr->next_irq = 16;
|
||||
|
||||
/* Set up EPOW events infrastructure */
|
||||
spapr_events_init(spapr);
|
||||
|
||||
/* Set up IOMMU */
|
||||
spapr_iommu_init();
|
||||
|
||||
|
@ -840,7 +849,7 @@ static void ppc_spapr_init(QEMUMachineInitArgs *args)
|
|||
spapr->has_graphics = true;
|
||||
}
|
||||
|
||||
if (usb_enabled) {
|
||||
if (usb_enabled(spapr->has_graphics)) {
|
||||
pci_create_simple(phb->bus, -1, "pci-ohci");
|
||||
if (spapr->has_graphics) {
|
||||
usbdevice_create("keyboard");
|
||||
|
@ -903,7 +912,8 @@ static void ppc_spapr_init(QEMUMachineInitArgs *args)
|
|||
spapr->fdt_skel = spapr_create_fdt_skel(cpu_model,
|
||||
initrd_base, initrd_size,
|
||||
kernel_size,
|
||||
boot_device, kernel_cmdline);
|
||||
boot_device, kernel_cmdline,
|
||||
spapr->epow_irq);
|
||||
assert(spapr->fdt_skel != NULL);
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,9 @@ typedef struct sPAPREnvironment {
|
|||
int rtc_offset;
|
||||
char *cpu_model;
|
||||
bool has_graphics;
|
||||
|
||||
uint32_t epow_irq;
|
||||
Notifier epow_notifier;
|
||||
} sPAPREnvironment;
|
||||
|
||||
#define H_SUCCESS 0
|
||||
|
@ -335,7 +338,12 @@ typedef struct sPAPRTCE {
|
|||
#define SPAPR_VIO_BASE_LIOBN 0x00000000
|
||||
#define SPAPR_PCI_BASE_LIOBN 0x80000000
|
||||
|
||||
#define RTAS_ERROR_LOG_MAX 2048
|
||||
|
||||
|
||||
void spapr_iommu_init(void);
|
||||
void spapr_events_init(sPAPREnvironment *spapr);
|
||||
void spapr_events_fdt_skel(void *fdt, uint32_t epow_irq);
|
||||
DMAContext *spapr_tce_new_dma_context(uint32_t liobn, size_t window_size);
|
||||
void spapr_tce_free(DMAContext *dma);
|
||||
void spapr_tce_reset(DMAContext *dma);
|
||||
|
|
321
hw/spapr_events.c
Normal file
321
hw/spapr_events.c
Normal file
|
@ -0,0 +1,321 @@
|
|||
/*
|
||||
* QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
|
||||
*
|
||||
* RTAS events handling
|
||||
*
|
||||
* Copyright (c) 2012 David Gibson, IBM Corporation.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include "cpu.h"
|
||||
#include "sysemu.h"
|
||||
#include "qemu-char.h"
|
||||
#include "hw/qdev.h"
|
||||
#include "device_tree.h"
|
||||
|
||||
#include "hw/spapr.h"
|
||||
#include "hw/spapr_vio.h"
|
||||
|
||||
#include <libfdt.h>
|
||||
|
||||
struct rtas_error_log {
|
||||
uint32_t summary;
|
||||
#define RTAS_LOG_VERSION_MASK 0xff000000
|
||||
#define RTAS_LOG_VERSION_6 0x06000000
|
||||
#define RTAS_LOG_SEVERITY_MASK 0x00e00000
|
||||
#define RTAS_LOG_SEVERITY_ALREADY_REPORTED 0x00c00000
|
||||
#define RTAS_LOG_SEVERITY_FATAL 0x00a00000
|
||||
#define RTAS_LOG_SEVERITY_ERROR 0x00800000
|
||||
#define RTAS_LOG_SEVERITY_ERROR_SYNC 0x00600000
|
||||
#define RTAS_LOG_SEVERITY_WARNING 0x00400000
|
||||
#define RTAS_LOG_SEVERITY_EVENT 0x00200000
|
||||
#define RTAS_LOG_SEVERITY_NO_ERROR 0x00000000
|
||||
#define RTAS_LOG_DISPOSITION_MASK 0x00180000
|
||||
#define RTAS_LOG_DISPOSITION_FULLY_RECOVERED 0x00000000
|
||||
#define RTAS_LOG_DISPOSITION_LIMITED_RECOVERY 0x00080000
|
||||
#define RTAS_LOG_DISPOSITION_NOT_RECOVERED 0x00100000
|
||||
#define RTAS_LOG_OPTIONAL_PART_PRESENT 0x00040000
|
||||
#define RTAS_LOG_INITIATOR_MASK 0x0000f000
|
||||
#define RTAS_LOG_INITIATOR_UNKNOWN 0x00000000
|
||||
#define RTAS_LOG_INITIATOR_CPU 0x00001000
|
||||
#define RTAS_LOG_INITIATOR_PCI 0x00002000
|
||||
#define RTAS_LOG_INITIATOR_MEMORY 0x00004000
|
||||
#define RTAS_LOG_INITIATOR_HOTPLUG 0x00006000
|
||||
#define RTAS_LOG_TARGET_MASK 0x00000f00
|
||||
#define RTAS_LOG_TARGET_UNKNOWN 0x00000000
|
||||
#define RTAS_LOG_TARGET_CPU 0x00000100
|
||||
#define RTAS_LOG_TARGET_PCI 0x00000200
|
||||
#define RTAS_LOG_TARGET_MEMORY 0x00000400
|
||||
#define RTAS_LOG_TARGET_HOTPLUG 0x00000600
|
||||
#define RTAS_LOG_TYPE_MASK 0x000000ff
|
||||
#define RTAS_LOG_TYPE_OTHER 0x00000000
|
||||
#define RTAS_LOG_TYPE_RETRY 0x00000001
|
||||
#define RTAS_LOG_TYPE_TCE_ERR 0x00000002
|
||||
#define RTAS_LOG_TYPE_INTERN_DEV_FAIL 0x00000003
|
||||
#define RTAS_LOG_TYPE_TIMEOUT 0x00000004
|
||||
#define RTAS_LOG_TYPE_DATA_PARITY 0x00000005
|
||||
#define RTAS_LOG_TYPE_ADDR_PARITY 0x00000006
|
||||
#define RTAS_LOG_TYPE_CACHE_PARITY 0x00000007
|
||||
#define RTAS_LOG_TYPE_ADDR_INVALID 0x00000008
|
||||
#define RTAS_LOG_TYPE_ECC_UNCORR 0x00000009
|
||||
#define RTAS_LOG_TYPE_ECC_CORR 0x0000000a
|
||||
#define RTAS_LOG_TYPE_EPOW 0x00000040
|
||||
uint32_t extended_length;
|
||||
} QEMU_PACKED;
|
||||
|
||||
struct rtas_event_log_v6 {
|
||||
uint8_t b0;
|
||||
#define RTAS_LOG_V6_B0_VALID 0x80
|
||||
#define RTAS_LOG_V6_B0_UNRECOVERABLE_ERROR 0x40
|
||||
#define RTAS_LOG_V6_B0_RECOVERABLE_ERROR 0x20
|
||||
#define RTAS_LOG_V6_B0_DEGRADED_OPERATION 0x10
|
||||
#define RTAS_LOG_V6_B0_PREDICTIVE_ERROR 0x08
|
||||
#define RTAS_LOG_V6_B0_NEW_LOG 0x04
|
||||
#define RTAS_LOG_V6_B0_BIGENDIAN 0x02
|
||||
uint8_t _resv1;
|
||||
uint8_t b2;
|
||||
#define RTAS_LOG_V6_B2_POWERPC_FORMAT 0x80
|
||||
#define RTAS_LOG_V6_B2_LOG_FORMAT_MASK 0x0f
|
||||
#define RTAS_LOG_V6_B2_LOG_FORMAT_PLATFORM_EVENT 0x0e
|
||||
uint8_t _resv2[9];
|
||||
uint32_t company;
|
||||
#define RTAS_LOG_V6_COMPANY_IBM 0x49424d00 /* IBM<null> */
|
||||
} QEMU_PACKED;
|
||||
|
||||
struct rtas_event_log_v6_section_header {
|
||||
uint16_t section_id;
|
||||
uint16_t section_length;
|
||||
uint8_t section_version;
|
||||
uint8_t section_subtype;
|
||||
uint16_t creator_component_id;
|
||||
} QEMU_PACKED;
|
||||
|
||||
struct rtas_event_log_v6_maina {
|
||||
#define RTAS_LOG_V6_SECTION_ID_MAINA 0x5048 /* PH */
|
||||
struct rtas_event_log_v6_section_header hdr;
|
||||
uint32_t creation_date; /* BCD: YYYYMMDD */
|
||||
uint32_t creation_time; /* BCD: HHMMSS00 */
|
||||
uint8_t _platform1[8];
|
||||
char creator_id;
|
||||
uint8_t _resv1[2];
|
||||
uint8_t section_count;
|
||||
uint8_t _resv2[4];
|
||||
uint8_t _platform2[8];
|
||||
uint32_t plid;
|
||||
uint8_t _platform3[4];
|
||||
} QEMU_PACKED;
|
||||
|
||||
struct rtas_event_log_v6_mainb {
|
||||
#define RTAS_LOG_V6_SECTION_ID_MAINB 0x5548 /* UH */
|
||||
struct rtas_event_log_v6_section_header hdr;
|
||||
uint8_t subsystem_id;
|
||||
uint8_t _platform1;
|
||||
uint8_t event_severity;
|
||||
uint8_t event_subtype;
|
||||
uint8_t _platform2[4];
|
||||
uint8_t _resv1[2];
|
||||
uint16_t action_flags;
|
||||
uint8_t _resv2[4];
|
||||
} QEMU_PACKED;
|
||||
|
||||
struct rtas_event_log_v6_epow {
|
||||
#define RTAS_LOG_V6_SECTION_ID_EPOW 0x4550 /* EP */
|
||||
struct rtas_event_log_v6_section_header hdr;
|
||||
uint8_t sensor_value;
|
||||
#define RTAS_LOG_V6_EPOW_ACTION_RESET 0
|
||||
#define RTAS_LOG_V6_EPOW_ACTION_WARN_COOLING 1
|
||||
#define RTAS_LOG_V6_EPOW_ACTION_WARN_POWER 2
|
||||
#define RTAS_LOG_V6_EPOW_ACTION_SYSTEM_SHUTDOWN 3
|
||||
#define RTAS_LOG_V6_EPOW_ACTION_SYSTEM_HALT 4
|
||||
#define RTAS_LOG_V6_EPOW_ACTION_MAIN_ENCLOSURE 5
|
||||
#define RTAS_LOG_V6_EPOW_ACTION_POWER_OFF 7
|
||||
uint8_t event_modifier;
|
||||
#define RTAS_LOG_V6_EPOW_MODIFIER_NORMAL 1
|
||||
#define RTAS_LOG_V6_EPOW_MODIFIER_ON_UPS 2
|
||||
#define RTAS_LOG_V6_EPOW_MODIFIER_CRITICAL 3
|
||||
#define RTAS_LOG_V6_EPOW_MODIFIER_TEMPERATURE 4
|
||||
uint8_t extended_modifier;
|
||||
#define RTAS_LOG_V6_EPOW_XMODIFIER_SYSTEM_WIDE 0
|
||||
#define RTAS_LOG_V6_EPOW_XMODIFIER_PARTITION_SPECIFIC 1
|
||||
uint8_t _resv;
|
||||
uint64_t reason_code;
|
||||
} QEMU_PACKED;
|
||||
|
||||
struct epow_log_full {
|
||||
struct rtas_error_log hdr;
|
||||
struct rtas_event_log_v6 v6hdr;
|
||||
struct rtas_event_log_v6_maina maina;
|
||||
struct rtas_event_log_v6_mainb mainb;
|
||||
struct rtas_event_log_v6_epow epow;
|
||||
} QEMU_PACKED;
|
||||
|
||||
#define EVENT_MASK_INTERNAL_ERRORS 0x80000000
|
||||
#define EVENT_MASK_EPOW 0x40000000
|
||||
#define EVENT_MASK_HOTPLUG 0x10000000
|
||||
#define EVENT_MASK_IO 0x08000000
|
||||
|
||||
#define _FDT(exp) \
|
||||
do { \
|
||||
int ret = (exp); \
|
||||
if (ret < 0) { \
|
||||
fprintf(stderr, "qemu: error creating device tree: %s: %s\n", \
|
||||
#exp, fdt_strerror(ret)); \
|
||||
exit(1); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
void spapr_events_fdt_skel(void *fdt, uint32_t epow_irq)
|
||||
{
|
||||
uint32_t epow_irq_ranges[] = {cpu_to_be32(epow_irq), cpu_to_be32(1)};
|
||||
uint32_t epow_interrupts[] = {cpu_to_be32(epow_irq), 0};
|
||||
|
||||
_FDT((fdt_begin_node(fdt, "event-sources")));
|
||||
|
||||
_FDT((fdt_property(fdt, "interrupt-controller", NULL, 0)));
|
||||
_FDT((fdt_property_cell(fdt, "#interrupt-cells", 2)));
|
||||
_FDT((fdt_property(fdt, "interrupt-ranges",
|
||||
epow_irq_ranges, sizeof(epow_irq_ranges))));
|
||||
|
||||
_FDT((fdt_begin_node(fdt, "epow-events")));
|
||||
_FDT((fdt_property(fdt, "interrupts",
|
||||
epow_interrupts, sizeof(epow_interrupts))));
|
||||
_FDT((fdt_end_node(fdt)));
|
||||
|
||||
_FDT((fdt_end_node(fdt)));
|
||||
}
|
||||
|
||||
static struct epow_log_full *pending_epow;
|
||||
static uint32_t next_plid;
|
||||
|
||||
static void spapr_powerdown_req(Notifier *n, void *opaque)
|
||||
{
|
||||
sPAPREnvironment *spapr = container_of(n, sPAPREnvironment, epow_notifier);
|
||||
struct rtas_error_log *hdr;
|
||||
struct rtas_event_log_v6 *v6hdr;
|
||||
struct rtas_event_log_v6_maina *maina;
|
||||
struct rtas_event_log_v6_mainb *mainb;
|
||||
struct rtas_event_log_v6_epow *epow;
|
||||
struct tm tm;
|
||||
int year;
|
||||
|
||||
if (pending_epow) {
|
||||
/* For now, we just throw away earlier events if two come
|
||||
* along before any are consumed. This is sufficient for our
|
||||
* powerdown messages, but we'll need more if we do more
|
||||
* general error/event logging */
|
||||
g_free(pending_epow);
|
||||
}
|
||||
pending_epow = g_malloc0(sizeof(*pending_epow));
|
||||
hdr = &pending_epow->hdr;
|
||||
v6hdr = &pending_epow->v6hdr;
|
||||
maina = &pending_epow->maina;
|
||||
mainb = &pending_epow->mainb;
|
||||
epow = &pending_epow->epow;
|
||||
|
||||
hdr->summary = cpu_to_be32(RTAS_LOG_VERSION_6
|
||||
| RTAS_LOG_SEVERITY_EVENT
|
||||
| RTAS_LOG_DISPOSITION_NOT_RECOVERED
|
||||
| RTAS_LOG_OPTIONAL_PART_PRESENT
|
||||
| RTAS_LOG_TYPE_EPOW);
|
||||
hdr->extended_length = cpu_to_be32(sizeof(*pending_epow)
|
||||
- sizeof(pending_epow->hdr));
|
||||
|
||||
v6hdr->b0 = RTAS_LOG_V6_B0_VALID | RTAS_LOG_V6_B0_NEW_LOG
|
||||
| RTAS_LOG_V6_B0_BIGENDIAN;
|
||||
v6hdr->b2 = RTAS_LOG_V6_B2_POWERPC_FORMAT
|
||||
| RTAS_LOG_V6_B2_LOG_FORMAT_PLATFORM_EVENT;
|
||||
v6hdr->company = cpu_to_be32(RTAS_LOG_V6_COMPANY_IBM);
|
||||
|
||||
maina->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINA);
|
||||
maina->hdr.section_length = cpu_to_be16(sizeof(*maina));
|
||||
/* FIXME: section version, subtype and creator id? */
|
||||
qemu_get_timedate(&tm, spapr->rtc_offset);
|
||||
year = tm.tm_year + 1900;
|
||||
maina->creation_date = cpu_to_be32((to_bcd(year / 100) << 24)
|
||||
| (to_bcd(year % 100) << 16)
|
||||
| (to_bcd(tm.tm_mon + 1) << 8)
|
||||
| to_bcd(tm.tm_mday));
|
||||
maina->creation_time = cpu_to_be32((to_bcd(tm.tm_hour) << 24)
|
||||
| (to_bcd(tm.tm_min) << 16)
|
||||
| (to_bcd(tm.tm_sec) << 8));
|
||||
maina->creator_id = 'H'; /* Hypervisor */
|
||||
maina->section_count = 3; /* Main-A, Main-B and EPOW */
|
||||
maina->plid = next_plid++;
|
||||
|
||||
mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB);
|
||||
mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb));
|
||||
/* FIXME: section version, subtype and creator id? */
|
||||
mainb->subsystem_id = 0xa0; /* External environment */
|
||||
mainb->event_severity = 0x00; /* Informational / non-error */
|
||||
mainb->event_subtype = 0xd0; /* Normal shutdown */
|
||||
|
||||
epow->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_EPOW);
|
||||
epow->hdr.section_length = cpu_to_be16(sizeof(*epow));
|
||||
epow->hdr.section_version = 2; /* includes extended modifier */
|
||||
/* FIXME: section subtype and creator id? */
|
||||
epow->sensor_value = RTAS_LOG_V6_EPOW_ACTION_SYSTEM_SHUTDOWN;
|
||||
epow->event_modifier = RTAS_LOG_V6_EPOW_MODIFIER_NORMAL;
|
||||
epow->extended_modifier = RTAS_LOG_V6_EPOW_XMODIFIER_PARTITION_SPECIFIC;
|
||||
|
||||
qemu_irq_pulse(xics_get_qirq(spapr->icp, spapr->epow_irq));
|
||||
}
|
||||
|
||||
static void check_exception(sPAPREnvironment *spapr,
|
||||
uint32_t token, uint32_t nargs,
|
||||
target_ulong args,
|
||||
uint32_t nret, target_ulong rets)
|
||||
{
|
||||
uint32_t mask, buf, len;
|
||||
uint64_t xinfo;
|
||||
|
||||
if ((nargs < 6) || (nargs > 7) || nret != 1) {
|
||||
rtas_st(rets, 0, -3);
|
||||
return;
|
||||
}
|
||||
|
||||
xinfo = rtas_ld(args, 1);
|
||||
mask = rtas_ld(args, 2);
|
||||
buf = rtas_ld(args, 4);
|
||||
len = rtas_ld(args, 5);
|
||||
if (nargs == 7) {
|
||||
xinfo |= (uint64_t)rtas_ld(args, 6) << 32;
|
||||
}
|
||||
|
||||
if ((mask & EVENT_MASK_EPOW) && pending_epow) {
|
||||
if (sizeof(*pending_epow) < len) {
|
||||
len = sizeof(*pending_epow);
|
||||
}
|
||||
|
||||
cpu_physical_memory_write(buf, pending_epow, len);
|
||||
g_free(pending_epow);
|
||||
pending_epow = NULL;
|
||||
rtas_st(rets, 0, 0);
|
||||
} else {
|
||||
rtas_st(rets, 0, 1);
|
||||
}
|
||||
}
|
||||
|
||||
void spapr_events_init(sPAPREnvironment *spapr)
|
||||
{
|
||||
spapr->epow_irq = spapr_allocate_msi(0);
|
||||
spapr->epow_notifier.notify = spapr_powerdown_req;
|
||||
qemu_register_powerdown_notifier(&spapr->epow_notifier);
|
||||
spapr_rtas_register("check-exception", check_exception);
|
||||
}
|
|
@ -366,26 +366,26 @@ static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa)
|
|||
return H_PARAMETER;
|
||||
}
|
||||
|
||||
env->vpa = vpa;
|
||||
env->vpa_addr = vpa;
|
||||
|
||||
tmp = ldub_phys(env->vpa + VPA_SHARED_PROC_OFFSET);
|
||||
tmp = ldub_phys(env->vpa_addr + VPA_SHARED_PROC_OFFSET);
|
||||
tmp |= VPA_SHARED_PROC_VAL;
|
||||
stb_phys(env->vpa + VPA_SHARED_PROC_OFFSET, tmp);
|
||||
stb_phys(env->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp);
|
||||
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
static target_ulong deregister_vpa(CPUPPCState *env, target_ulong vpa)
|
||||
{
|
||||
if (env->slb_shadow) {
|
||||
if (env->slb_shadow_addr) {
|
||||
return H_RESOURCE;
|
||||
}
|
||||
|
||||
if (env->dispatch_trace_log) {
|
||||
if (env->dtl_addr) {
|
||||
return H_RESOURCE;
|
||||
}
|
||||
|
||||
env->vpa = 0;
|
||||
env->vpa_addr = 0;
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -407,18 +407,20 @@ static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr)
|
|||
return H_PARAMETER;
|
||||
}
|
||||
|
||||
if (!env->vpa) {
|
||||
if (!env->vpa_addr) {
|
||||
return H_RESOURCE;
|
||||
}
|
||||
|
||||
env->slb_shadow = addr;
|
||||
env->slb_shadow_addr = addr;
|
||||
env->slb_shadow_size = size;
|
||||
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
static target_ulong deregister_slb_shadow(CPUPPCState *env, target_ulong addr)
|
||||
{
|
||||
env->slb_shadow = 0;
|
||||
env->slb_shadow_addr = 0;
|
||||
env->slb_shadow_size = 0;
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -437,11 +439,11 @@ static target_ulong register_dtl(CPUPPCState *env, target_ulong addr)
|
|||
return H_PARAMETER;
|
||||
}
|
||||
|
||||
if (!env->vpa) {
|
||||
if (!env->vpa_addr) {
|
||||
return H_RESOURCE;
|
||||
}
|
||||
|
||||
env->dispatch_trace_log = addr;
|
||||
env->dtl_addr = addr;
|
||||
env->dtl_size = size;
|
||||
|
||||
return H_SUCCESS;
|
||||
|
@ -449,7 +451,7 @@ static target_ulong register_dtl(CPUPPCState *env, target_ulong addr)
|
|||
|
||||
static target_ulong deregister_dtl(CPUPPCState *env, target_ulong addr)
|
||||
{
|
||||
env->dispatch_trace_log = 0;
|
||||
env->dtl_addr = 0;
|
||||
env->dtl_size = 0;
|
||||
|
||||
return H_SUCCESS;
|
||||
|
@ -670,11 +672,10 @@ void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn)
|
|||
} else {
|
||||
assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX));
|
||||
|
||||
|
||||
slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
|
||||
}
|
||||
|
||||
assert(!(*slot) || (fn == *slot));
|
||||
assert(!(*slot));
|
||||
*slot = fn;
|
||||
}
|
||||
|
||||
|
|
|
@ -439,43 +439,6 @@ static void pci_spapr_set_irq(void *opaque, int irq_num, int level)
|
|||
qemu_set_irq(spapr_phb_lsi_qirq(phb, irq_num), level);
|
||||
}
|
||||
|
||||
static uint64_t spapr_io_read(void *opaque, hwaddr addr,
|
||||
unsigned size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
return cpu_inb(addr);
|
||||
case 2:
|
||||
return cpu_inw(addr);
|
||||
case 4:
|
||||
return cpu_inl(addr);
|
||||
}
|
||||
assert(0);
|
||||
}
|
||||
|
||||
static void spapr_io_write(void *opaque, hwaddr addr,
|
||||
uint64_t data, unsigned size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
cpu_outb(addr, data);
|
||||
return;
|
||||
case 2:
|
||||
cpu_outw(addr, data);
|
||||
return;
|
||||
case 4:
|
||||
cpu_outl(addr, data);
|
||||
return;
|
||||
}
|
||||
assert(0);
|
||||
}
|
||||
|
||||
static const MemoryRegionOps spapr_io_ops = {
|
||||
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||
.read = spapr_io_read,
|
||||
.write = spapr_io_write
|
||||
};
|
||||
|
||||
/*
|
||||
* MSI/MSIX memory region implementation.
|
||||
* The handler handles both MSI and MSIX.
|
||||
|
@ -545,14 +508,9 @@ static int spapr_phb_init(SysBusDevice *s)
|
|||
* old_portion are updated */
|
||||
sprintf(namebuf, "%s.io", sphb->dtbusname);
|
||||
memory_region_init(&sphb->iospace, namebuf, SPAPR_PCI_IO_WIN_SIZE);
|
||||
/* FIXME: fix to support multiple PHBs */
|
||||
memory_region_add_subregion(get_system_io(), 0, &sphb->iospace);
|
||||
|
||||
sprintf(namebuf, "%s.io-alias", sphb->dtbusname);
|
||||
memory_region_init_io(&sphb->iowindow, &spapr_io_ops, sphb,
|
||||
namebuf, SPAPR_PCI_IO_WIN_SIZE);
|
||||
memory_region_add_subregion(get_system_memory(), sphb->io_win_addr,
|
||||
&sphb->iowindow);
|
||||
&sphb->iospace);
|
||||
|
||||
/* As MSI/MSIX interrupts trigger by writing at MSI/MSIX vectors,
|
||||
* we need to allocate some memory to catch those writes coming
|
||||
|
|
|
@ -44,7 +44,7 @@ typedef struct sPAPRPHBState {
|
|||
MemoryRegion memspace, iospace;
|
||||
hwaddr mem_win_addr, mem_win_size, io_win_addr, io_win_size;
|
||||
hwaddr msi_win_addr;
|
||||
MemoryRegion memwindow, iowindow, msiwindow;
|
||||
MemoryRegion memwindow, msiwindow;
|
||||
|
||||
uint32_t dma_liobn;
|
||||
uint64_t dma_window_start;
|
||||
|
|
|
@ -241,6 +241,15 @@ target_ulong spapr_rtas_call(sPAPREnvironment *spapr,
|
|||
|
||||
void spapr_rtas_register(const char *name, spapr_rtas_fn fn)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < (rtas_next - rtas_table); i++) {
|
||||
if (strcmp(name, rtas_table[i].name) == 0) {
|
||||
fprintf(stderr, "RTAS call \"%s\" registered twice\n", name);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
assert(rtas_next < (rtas_table + TOKEN_MAX));
|
||||
|
||||
rtas_next->name = name;
|
||||
|
|
28
hw/usb.h
28
hw/usb.h
|
@ -38,12 +38,14 @@
|
|||
#define USB_TOKEN_IN 0x69 /* device -> host */
|
||||
#define USB_TOKEN_OUT 0xe1 /* host -> device */
|
||||
|
||||
#define USB_RET_NODEV (-1)
|
||||
#define USB_RET_NAK (-2)
|
||||
#define USB_RET_STALL (-3)
|
||||
#define USB_RET_BABBLE (-4)
|
||||
#define USB_RET_IOERROR (-5)
|
||||
#define USB_RET_ASYNC (-6)
|
||||
#define USB_RET_NODEV (-1)
|
||||
#define USB_RET_NAK (-2)
|
||||
#define USB_RET_STALL (-3)
|
||||
#define USB_RET_BABBLE (-4)
|
||||
#define USB_RET_IOERROR (-5)
|
||||
#define USB_RET_ASYNC (-6)
|
||||
#define USB_RET_ADD_TO_QUEUE (-7)
|
||||
#define USB_RET_REMOVE_FROM_QUEUE (-8)
|
||||
|
||||
#define USB_SPEED_LOW 0
|
||||
#define USB_SPEED_FULL 1
|
||||
|
@ -293,6 +295,12 @@ typedef struct USBDeviceClass {
|
|||
void (*set_interface)(USBDevice *dev, int interface,
|
||||
int alt_old, int alt_new);
|
||||
|
||||
/*
|
||||
* Called when the hcd is done queuing packets for an endpoint, only
|
||||
* necessary for devices which can return USB_RET_ADD_TO_QUEUE.
|
||||
*/
|
||||
void (*flush_ep_queue)(USBDevice *dev, USBEndpoint *ep);
|
||||
|
||||
const char *product_desc;
|
||||
const USBDesc *usb_desc;
|
||||
} USBDeviceClass;
|
||||
|
@ -343,6 +351,8 @@ struct USBPacket {
|
|||
USBEndpoint *ep;
|
||||
QEMUIOVector iov;
|
||||
uint64_t parameter; /* control transfers */
|
||||
bool short_not_ok;
|
||||
bool int_req;
|
||||
int result; /* transfer length or USB_RET_* status code */
|
||||
/* Internal use by the USB layer. */
|
||||
USBPacketState state;
|
||||
|
@ -352,7 +362,8 @@ struct USBPacket {
|
|||
void usb_packet_init(USBPacket *p);
|
||||
void usb_packet_set_state(USBPacket *p, USBPacketState state);
|
||||
void usb_packet_check_state(USBPacket *p, USBPacketState expected);
|
||||
void usb_packet_setup(USBPacket *p, int pid, USBEndpoint *ep, uint64_t id);
|
||||
void usb_packet_setup(USBPacket *p, int pid, USBEndpoint *ep, uint64_t id,
|
||||
bool short_not_ok, bool int_req);
|
||||
void usb_packet_addbuf(USBPacket *p, void *ptr, size_t len);
|
||||
int usb_packet_map(USBPacket *p, QEMUSGList *sgl);
|
||||
void usb_packet_unmap(USBPacket *p, QEMUSGList *sgl);
|
||||
|
@ -370,6 +381,7 @@ USBDevice *usb_find_device(USBPort *port, uint8_t addr);
|
|||
|
||||
int usb_handle_packet(USBDevice *dev, USBPacket *p);
|
||||
void usb_packet_complete(USBDevice *dev, USBPacket *p);
|
||||
void usb_packet_complete_one(USBDevice *dev, USBPacket *p);
|
||||
void usb_cancel_packet(USBPacket * p);
|
||||
|
||||
void usb_ep_init(USBDevice *dev);
|
||||
|
@ -506,6 +518,8 @@ int usb_device_handle_data(USBDevice *dev, USBPacket *p);
|
|||
void usb_device_set_interface(USBDevice *dev, int interface,
|
||||
int alt_old, int alt_new);
|
||||
|
||||
void usb_device_flush_ep_queue(USBDevice *dev, USBEndpoint *ep);
|
||||
|
||||
const char *usb_device_get_product_desc(USBDevice *dev);
|
||||
|
||||
const USBDesc *usb_device_get_usb_desc(USBDevice *dev);
|
||||
|
|
|
@ -181,6 +181,14 @@ void usb_device_set_interface(USBDevice *dev, int interface,
|
|||
}
|
||||
}
|
||||
|
||||
void usb_device_flush_ep_queue(USBDevice *dev, USBEndpoint *ep)
|
||||
{
|
||||
USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev);
|
||||
if (klass->flush_ep_queue) {
|
||||
klass->flush_ep_queue(dev, ep);
|
||||
}
|
||||
}
|
||||
|
||||
static int usb_qdev_init(DeviceState *qdev)
|
||||
{
|
||||
USBDevice *dev = USB_DEVICE(qdev);
|
||||
|
|
|
@ -391,8 +391,13 @@ int usb_handle_packet(USBDevice *dev, USBPacket *p)
|
|||
if (QTAILQ_EMPTY(&p->ep->queue) || p->ep->pipeline) {
|
||||
ret = usb_process_one(p);
|
||||
if (ret == USB_RET_ASYNC) {
|
||||
assert(p->ep->type != USB_ENDPOINT_XFER_ISOC);
|
||||
usb_packet_set_state(p, USB_PACKET_ASYNC);
|
||||
QTAILQ_INSERT_TAIL(&p->ep->queue, p, queue);
|
||||
} else if (ret == USB_RET_ADD_TO_QUEUE) {
|
||||
usb_packet_set_state(p, USB_PACKET_QUEUED);
|
||||
QTAILQ_INSERT_TAIL(&p->ep->queue, p, queue);
|
||||
ret = USB_RET_ASYNC;
|
||||
} else {
|
||||
/*
|
||||
* When pipelining is enabled usb-devices must always return async,
|
||||
|
@ -412,13 +417,14 @@ int usb_handle_packet(USBDevice *dev, USBPacket *p)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void __usb_packet_complete(USBDevice *dev, USBPacket *p)
|
||||
void usb_packet_complete_one(USBDevice *dev, USBPacket *p)
|
||||
{
|
||||
USBEndpoint *ep = p->ep;
|
||||
|
||||
assert(QTAILQ_FIRST(&ep->queue) == p);
|
||||
assert(p->result != USB_RET_ASYNC && p->result != USB_RET_NAK);
|
||||
|
||||
if (p->result < 0) {
|
||||
if (p->result < 0 || (p->short_not_ok && (p->result < p->iov.size))) {
|
||||
ep->halted = true;
|
||||
}
|
||||
usb_packet_set_state(p, USB_PACKET_COMPLETE);
|
||||
|
@ -435,11 +441,16 @@ void usb_packet_complete(USBDevice *dev, USBPacket *p)
|
|||
int ret;
|
||||
|
||||
usb_packet_check_state(p, USB_PACKET_ASYNC);
|
||||
assert(QTAILQ_FIRST(&ep->queue) == p);
|
||||
__usb_packet_complete(dev, p);
|
||||
usb_packet_complete_one(dev, p);
|
||||
|
||||
while (!ep->halted && !QTAILQ_EMPTY(&ep->queue)) {
|
||||
while (!QTAILQ_EMPTY(&ep->queue)) {
|
||||
p = QTAILQ_FIRST(&ep->queue);
|
||||
if (ep->halted) {
|
||||
/* Empty the queue on a halt */
|
||||
p->result = USB_RET_REMOVE_FROM_QUEUE;
|
||||
dev->port->ops->complete(dev->port, p);
|
||||
continue;
|
||||
}
|
||||
if (p->state == USB_PACKET_ASYNC) {
|
||||
break;
|
||||
}
|
||||
|
@ -450,7 +461,7 @@ void usb_packet_complete(USBDevice *dev, USBPacket *p)
|
|||
break;
|
||||
}
|
||||
p->result = ret;
|
||||
__usb_packet_complete(ep->dev, p);
|
||||
usb_packet_complete_one(ep->dev, p);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -522,7 +533,8 @@ void usb_packet_set_state(USBPacket *p, USBPacketState state)
|
|||
p->state = state;
|
||||
}
|
||||
|
||||
void usb_packet_setup(USBPacket *p, int pid, USBEndpoint *ep, uint64_t id)
|
||||
void usb_packet_setup(USBPacket *p, int pid, USBEndpoint *ep, uint64_t id,
|
||||
bool short_not_ok, bool int_req)
|
||||
{
|
||||
assert(!usb_packet_is_inflight(p));
|
||||
assert(p->iov.iov != NULL);
|
||||
|
@ -531,6 +543,8 @@ void usb_packet_setup(USBPacket *p, int pid, USBEndpoint *ep, uint64_t id)
|
|||
p->ep = ep;
|
||||
p->result = 0;
|
||||
p->parameter = 0;
|
||||
p->short_not_ok = short_not_ok;
|
||||
p->int_req = int_req;
|
||||
qemu_iovec_reset(&p->iov);
|
||||
usb_packet_set_state(p, USB_PACKET_SETUP);
|
||||
}
|
||||
|
|
|
@ -362,7 +362,6 @@ struct EHCIPacket {
|
|||
USBPacket packet;
|
||||
QEMUSGList sgl;
|
||||
int pid;
|
||||
uint32_t tbytes;
|
||||
enum async_state async;
|
||||
int usb_status;
|
||||
};
|
||||
|
@ -382,7 +381,7 @@ struct EHCIQueue {
|
|||
uint32_t qhaddr; /* address QH read from */
|
||||
uint32_t qtdaddr; /* address QTD read from */
|
||||
USBDevice *dev;
|
||||
QTAILQ_HEAD(, EHCIPacket) packets;
|
||||
QTAILQ_HEAD(pkts_head, EHCIPacket) packets;
|
||||
};
|
||||
|
||||
typedef QTAILQ_HEAD(EHCIQueueHead, EHCIQueue) EHCIQueueHead;
|
||||
|
@ -444,6 +443,7 @@ struct EHCIState {
|
|||
|
||||
uint64_t last_run_ns;
|
||||
uint32_t async_stepdown;
|
||||
bool int_req_by_async;
|
||||
};
|
||||
|
||||
#define SET_LAST_RUN_CLOCK(s) \
|
||||
|
@ -488,6 +488,7 @@ static const char *ehci_mmio_names[] = {
|
|||
|
||||
static int ehci_state_executing(EHCIQueue *q);
|
||||
static int ehci_state_writeback(EHCIQueue *q);
|
||||
static int ehci_fill_queue(EHCIPacket *p);
|
||||
|
||||
static const char *nr2str(const char **n, size_t len, uint32_t nr)
|
||||
{
|
||||
|
@ -1245,7 +1246,7 @@ static void ehci_opreg_write(void *ptr, hwaddr addr,
|
|||
s->usbcmd = val; /* Set usbcmd for ehci_update_halt() */
|
||||
ehci_update_halt(s);
|
||||
s->async_stepdown = 0;
|
||||
qemu_mod_timer(s->frame_timer, qemu_get_clock_ns(vm_clock));
|
||||
qemu_bh_schedule(s->async_bh);
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -1456,8 +1457,15 @@ static void ehci_async_complete_packet(USBPort *port, USBPacket *packet)
|
|||
}
|
||||
|
||||
p = container_of(packet, EHCIPacket, packet);
|
||||
trace_usb_ehci_packet_action(p->queue, p, "wakeup");
|
||||
assert(p->async == EHCI_ASYNC_INFLIGHT);
|
||||
|
||||
if (packet->result == USB_RET_REMOVE_FROM_QUEUE) {
|
||||
trace_usb_ehci_packet_action(p->queue, p, "remove");
|
||||
ehci_free_packet(p);
|
||||
return;
|
||||
}
|
||||
|
||||
trace_usb_ehci_packet_action(p->queue, p, "wakeup");
|
||||
p->async = EHCI_ASYNC_FINISHED;
|
||||
p->usb_status = packet->result;
|
||||
|
||||
|
@ -1505,15 +1513,20 @@ static void ehci_execute_complete(EHCIQueue *q)
|
|||
}
|
||||
} else {
|
||||
// TODO check 4.12 for splits
|
||||
uint32_t tbytes = get_field(q->qh.token, QTD_TOKEN_TBYTES);
|
||||
|
||||
if (p->tbytes && p->pid == USB_TOKEN_IN) {
|
||||
p->tbytes -= p->usb_status;
|
||||
if (tbytes && p->pid == USB_TOKEN_IN) {
|
||||
tbytes -= p->usb_status;
|
||||
if (tbytes) {
|
||||
/* 4.15.1.2 must raise int on a short input packet */
|
||||
ehci_raise_irq(q->ehci, USBSTS_INT);
|
||||
}
|
||||
} else {
|
||||
p->tbytes = 0;
|
||||
tbytes = 0;
|
||||
}
|
||||
|
||||
DPRINTF("updating tbytes to %d\n", p->tbytes);
|
||||
set_field(&q->qh.token, p->tbytes, QTD_TOKEN_TBYTES);
|
||||
DPRINTF("updating tbytes to %d\n", tbytes);
|
||||
set_field(&q->qh.token, tbytes, QTD_TOKEN_TBYTES);
|
||||
}
|
||||
ehci_finish_transfer(q, p->usb_status);
|
||||
usb_packet_unmap(&p->packet, &p->sgl);
|
||||
|
@ -1525,6 +1538,9 @@ static void ehci_execute_complete(EHCIQueue *q)
|
|||
|
||||
if (q->qh.token & QTD_TOKEN_IOC) {
|
||||
ehci_raise_irq(q->ehci, USBSTS_INT);
|
||||
if (q->async) {
|
||||
q->ehci->int_req_by_async = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1535,6 +1551,7 @@ static int ehci_execute(EHCIPacket *p, const char *action)
|
|||
USBEndpoint *ep;
|
||||
int ret;
|
||||
int endp;
|
||||
bool spd;
|
||||
|
||||
assert(p->async == EHCI_ASYNC_NONE ||
|
||||
p->async == EHCI_ASYNC_INITIALIZED);
|
||||
|
@ -1544,8 +1561,7 @@ static int ehci_execute(EHCIPacket *p, const char *action)
|
|||
return USB_RET_PROCERR;
|
||||
}
|
||||
|
||||
p->tbytes = (p->qtd.token & QTD_TOKEN_TBYTES_MASK) >> QTD_TOKEN_TBYTES_SH;
|
||||
if (p->tbytes > BUFF_SIZE) {
|
||||
if (get_field(p->qtd.token, QTD_TOKEN_TBYTES) > BUFF_SIZE) {
|
||||
ehci_trace_guest_bug(p->queue->ehci,
|
||||
"guest requested more bytes than allowed");
|
||||
return USB_RET_PROCERR;
|
||||
|
@ -1575,17 +1591,18 @@ static int ehci_execute(EHCIPacket *p, const char *action)
|
|||
return USB_RET_PROCERR;
|
||||
}
|
||||
|
||||
usb_packet_setup(&p->packet, p->pid, ep, p->qtdaddr);
|
||||
spd = (p->pid == USB_TOKEN_IN && NLPTR_TBIT(p->qtd.altnext) == 0);
|
||||
usb_packet_setup(&p->packet, p->pid, ep, p->qtdaddr, spd,
|
||||
(p->qtd.token & QTD_TOKEN_IOC) != 0);
|
||||
usb_packet_map(&p->packet, &p->sgl);
|
||||
p->async = EHCI_ASYNC_INITIALIZED;
|
||||
}
|
||||
|
||||
trace_usb_ehci_packet_action(p->queue, p, action);
|
||||
ret = usb_handle_packet(p->queue->dev, &p->packet);
|
||||
DPRINTF("submit: qh %x next %x qtd %x pid %x len %zd "
|
||||
"(total %d) endp %x ret %d\n",
|
||||
DPRINTF("submit: qh %x next %x qtd %x pid %x len %zd endp %x ret %d\n",
|
||||
q->qhaddr, q->qh.next, q->qtdaddr, q->pid,
|
||||
q->packet.iov.size, q->tbytes, endp, ret);
|
||||
q->packet.iov.size, endp, ret);
|
||||
|
||||
if (ret > BUFF_SIZE) {
|
||||
fprintf(stderr, "ret from usb_handle_packet > BUFF_SIZE\n");
|
||||
|
@ -1646,10 +1663,10 @@ static int ehci_process_itd(EHCIState *ehci,
|
|||
dev = ehci_find_device(ehci, devaddr);
|
||||
ep = usb_ep_get(dev, pid, endp);
|
||||
if (ep && ep->type == USB_ENDPOINT_XFER_ISOC) {
|
||||
usb_packet_setup(&ehci->ipacket, pid, ep, addr);
|
||||
usb_packet_setup(&ehci->ipacket, pid, ep, addr, false,
|
||||
(itd->transact[i] & ITD_XACT_IOC) != 0);
|
||||
usb_packet_map(&ehci->ipacket, &ehci->isgl);
|
||||
ret = usb_handle_packet(dev, &ehci->ipacket);
|
||||
assert(ret != USB_RET_ASYNC);
|
||||
usb_packet_unmap(&ehci->ipacket, &ehci->isgl);
|
||||
} else {
|
||||
DPRINTF("ISOCH: attempt to addess non-iso endpoint\n");
|
||||
|
@ -1988,7 +2005,7 @@ static int ehci_state_fetchqtd(EHCIQueue *q)
|
|||
{
|
||||
EHCIqtd qtd;
|
||||
EHCIPacket *p;
|
||||
int again = 0;
|
||||
int again = 1;
|
||||
|
||||
get_dwords(q->ehci, NLPTR_GET(q->qtdaddr), (uint32_t *) &qtd,
|
||||
sizeof(EHCIqtd) >> 2);
|
||||
|
@ -2016,7 +2033,6 @@ static int ehci_state_fetchqtd(EHCIQueue *q)
|
|||
p = NULL;
|
||||
}
|
||||
ehci_set_state(q->ehci, q->async, EST_HORIZONTALQH);
|
||||
again = 1;
|
||||
} else if (p != NULL) {
|
||||
switch (p->async) {
|
||||
case EHCI_ASYNC_NONE:
|
||||
|
@ -2025,6 +2041,9 @@ static int ehci_state_fetchqtd(EHCIQueue *q)
|
|||
ehci_set_state(q->ehci, q->async, EST_EXECUTE);
|
||||
break;
|
||||
case EHCI_ASYNC_INFLIGHT:
|
||||
/* Check if the guest has added new tds to the queue */
|
||||
again = (ehci_fill_queue(QTAILQ_LAST(&q->packets, pkts_head)) ==
|
||||
USB_RET_PROCERR) ? -1 : 1;
|
||||
/* Unfinished async handled packet, go horizontal */
|
||||
ehci_set_state(q->ehci, q->async, EST_HORIZONTALQH);
|
||||
break;
|
||||
|
@ -2036,13 +2055,11 @@ static int ehci_state_fetchqtd(EHCIQueue *q)
|
|||
ehci_set_state(q->ehci, q->async, EST_EXECUTING);
|
||||
break;
|
||||
}
|
||||
again = 1;
|
||||
} else {
|
||||
p = ehci_alloc_packet(q);
|
||||
p->qtdaddr = q->qtdaddr;
|
||||
p->qtd = qtd;
|
||||
ehci_set_state(q->ehci, q->async, EST_EXECUTE);
|
||||
again = 1;
|
||||
}
|
||||
|
||||
return again;
|
||||
|
@ -2065,18 +2082,23 @@ static int ehci_state_horizqh(EHCIQueue *q)
|
|||
|
||||
static int ehci_fill_queue(EHCIPacket *p)
|
||||
{
|
||||
USBEndpoint *ep = p->packet.ep;
|
||||
EHCIQueue *q = p->queue;
|
||||
EHCIqtd qtd = p->qtd;
|
||||
uint32_t qtdaddr;
|
||||
uint32_t qtdaddr, start_addr = p->qtdaddr;
|
||||
|
||||
for (;;) {
|
||||
if (NLPTR_TBIT(qtd.altnext) == 0) {
|
||||
break;
|
||||
}
|
||||
if (NLPTR_TBIT(qtd.next) != 0) {
|
||||
break;
|
||||
}
|
||||
qtdaddr = qtd.next;
|
||||
/*
|
||||
* Detect circular td lists, Windows creates these, counting on the
|
||||
* active bit going low after execution to make the queue stop.
|
||||
*/
|
||||
if (qtdaddr == start_addr) {
|
||||
break;
|
||||
}
|
||||
get_dwords(q->ehci, NLPTR_GET(qtdaddr),
|
||||
(uint32_t *) &qtd, sizeof(EHCIqtd) >> 2);
|
||||
ehci_trace_qtd(q, NLPTR_GET(qtdaddr), &qtd);
|
||||
|
@ -2093,6 +2115,9 @@ static int ehci_fill_queue(EHCIPacket *p)
|
|||
assert(p->usb_status == USB_RET_ASYNC);
|
||||
p->async = EHCI_ASYNC_INFLIGHT;
|
||||
}
|
||||
if (p->usb_status != USB_RET_PROCERR) {
|
||||
usb_device_flush_ep_queue(ep->dev, ep);
|
||||
}
|
||||
return p->usb_status;
|
||||
}
|
||||
|
||||
|
@ -2198,19 +2223,6 @@ static int ehci_state_writeback(EHCIQueue *q)
|
|||
* bit is clear.
|
||||
*/
|
||||
if (q->qh.token & QTD_TOKEN_HALT) {
|
||||
/*
|
||||
* We should not do any further processing on a halted queue!
|
||||
* This is esp. important for bulk endpoints with pipelining enabled
|
||||
* (redirection to a real USB device), where we must cancel all the
|
||||
* transfers after this one so that:
|
||||
* 1) If they've completed already, they are not processed further
|
||||
* causing more stalls, originating from the same failed transfer
|
||||
* 2) If still in flight, they are cancelled before the guest does
|
||||
* a clear stall, otherwise the guest and device can loose sync!
|
||||
*/
|
||||
while ((p = QTAILQ_FIRST(&q->packets)) != NULL) {
|
||||
ehci_free_packet(p);
|
||||
}
|
||||
ehci_set_state(q->ehci, q->async, EST_HORIZONTALQH);
|
||||
again = 1;
|
||||
} else {
|
||||
|
@ -2502,18 +2514,19 @@ static void ehci_frame_timer(void *opaque)
|
|||
}
|
||||
|
||||
if (need_timer) {
|
||||
expire_time = t_now + (get_ticks_per_sec()
|
||||
/* If we've raised int, we speed up the timer, so that we quickly
|
||||
* notice any new packets queued up in response */
|
||||
if (ehci->int_req_by_async && (ehci->usbsts & USBSTS_INT)) {
|
||||
expire_time = t_now + get_ticks_per_sec() / (FRAME_TIMER_FREQ * 2);
|
||||
ehci->int_req_by_async = false;
|
||||
} else {
|
||||
expire_time = t_now + (get_ticks_per_sec()
|
||||
* (ehci->async_stepdown+1) / FRAME_TIMER_FREQ);
|
||||
}
|
||||
qemu_mod_timer(ehci->frame_timer, expire_time);
|
||||
}
|
||||
}
|
||||
|
||||
static void ehci_async_bh(void *opaque)
|
||||
{
|
||||
EHCIState *ehci = opaque;
|
||||
ehci_advance_async_state(ehci);
|
||||
}
|
||||
|
||||
static const MemoryRegionOps ehci_mmio_caps_ops = {
|
||||
.read = ehci_caps_read,
|
||||
.valid.min_access_size = 1,
|
||||
|
@ -2742,7 +2755,7 @@ static int usb_ehci_initfn(PCIDevice *dev)
|
|||
}
|
||||
|
||||
s->frame_timer = qemu_new_timer_ns(vm_clock, ehci_frame_timer, s);
|
||||
s->async_bh = qemu_bh_new(ehci_async_bh, s);
|
||||
s->async_bh = qemu_bh_new(ehci_frame_timer, s);
|
||||
QTAILQ_INIT(&s->aqueues);
|
||||
QTAILQ_INIT(&s->pqueues);
|
||||
usb_packet_init(&s->ipacket);
|
||||
|
|
|
@ -627,7 +627,7 @@ static void musb_packet(MUSBState *s, MUSBEndPoint *ep,
|
|||
dev = usb_find_device(&s->port, ep->faddr[idx]);
|
||||
uep = usb_ep_get(dev, pid, ep->type[idx] & 0xf);
|
||||
usb_packet_setup(&ep->packey[dir].p, pid, uep,
|
||||
(dev->addr << 16) | (uep->nr << 8) | pid);
|
||||
(dev->addr << 16) | (uep->nr << 8) | pid, false, true);
|
||||
usb_packet_addbuf(&ep->packey[dir].p, ep->buf[idx], len);
|
||||
ep->packey[dir].ep = ep;
|
||||
ep->packey[dir].dir = dir;
|
||||
|
@ -635,6 +635,7 @@ static void musb_packet(MUSBState *s, MUSBEndPoint *ep,
|
|||
ret = usb_handle_packet(dev, &ep->packey[dir].p);
|
||||
|
||||
if (ret == USB_RET_ASYNC) {
|
||||
usb_device_flush_ep_queue(dev, uep);
|
||||
ep->status[dir] = len;
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -810,12 +810,15 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed,
|
|||
if (completion) {
|
||||
ret = ohci->usb_packet.result;
|
||||
} else {
|
||||
bool int_req = relative_frame_number == frame_count &&
|
||||
OHCI_BM(iso_td.flags, TD_DI) == 0;
|
||||
dev = ohci_find_device(ohci, OHCI_BM(ed->flags, ED_FA));
|
||||
ep = usb_ep_get(dev, pid, OHCI_BM(ed->flags, ED_EN));
|
||||
usb_packet_setup(&ohci->usb_packet, pid, ep, addr);
|
||||
usb_packet_setup(&ohci->usb_packet, pid, ep, addr, false, int_req);
|
||||
usb_packet_addbuf(&ohci->usb_packet, ohci->usb_buf, len);
|
||||
ret = usb_handle_packet(dev, &ohci->usb_packet);
|
||||
if (ret == USB_RET_ASYNC) {
|
||||
usb_device_flush_ep_queue(dev, ep);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
@ -1011,13 +1014,15 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed)
|
|||
}
|
||||
dev = ohci_find_device(ohci, OHCI_BM(ed->flags, ED_FA));
|
||||
ep = usb_ep_get(dev, pid, OHCI_BM(ed->flags, ED_EN));
|
||||
usb_packet_setup(&ohci->usb_packet, pid, ep, addr);
|
||||
usb_packet_setup(&ohci->usb_packet, pid, ep, addr, !flag_r,
|
||||
OHCI_BM(td.flags, TD_DI) == 0);
|
||||
usb_packet_addbuf(&ohci->usb_packet, ohci->usb_buf, pktlen);
|
||||
ret = usb_handle_packet(dev, &ohci->usb_packet);
|
||||
#ifdef DEBUG_PACKET
|
||||
DPRINTF("ret=%d\n", ret);
|
||||
#endif
|
||||
if (ret == USB_RET_ASYNC) {
|
||||
usb_device_flush_ep_queue(dev, ep);
|
||||
ohci->async_td = addr;
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -100,16 +100,17 @@ struct UHCIAsync {
|
|||
QEMUSGList sgl;
|
||||
UHCIQueue *queue;
|
||||
QTAILQ_ENTRY(UHCIAsync) next;
|
||||
uint32_t td;
|
||||
uint8_t isoc;
|
||||
uint32_t td_addr;
|
||||
uint8_t done;
|
||||
};
|
||||
|
||||
struct UHCIQueue {
|
||||
uint32_t qh_addr;
|
||||
uint32_t token;
|
||||
UHCIState *uhci;
|
||||
USBEndpoint *ep;
|
||||
QTAILQ_ENTRY(UHCIQueue) next;
|
||||
QTAILQ_HEAD(, UHCIAsync) asyncs;
|
||||
QTAILQ_HEAD(asyncs_head, UHCIAsync) asyncs;
|
||||
int8_t valid;
|
||||
};
|
||||
|
||||
|
@ -161,13 +162,55 @@ typedef struct UHCI_QH {
|
|||
uint32_t el_link;
|
||||
} UHCI_QH;
|
||||
|
||||
static void uhci_async_cancel(UHCIAsync *async);
|
||||
static void uhci_queue_fill(UHCIQueue *q, UHCI_TD *td);
|
||||
|
||||
static inline int32_t uhci_queue_token(UHCI_TD *td)
|
||||
{
|
||||
/* covers ep, dev, pid -> identifies the endpoint */
|
||||
return td->token & 0x7ffff;
|
||||
if ((td->token & (0xf << 15)) == 0) {
|
||||
/* ctrl ep, cover ep and dev, not pid! */
|
||||
return td->token & 0x7ff00;
|
||||
} else {
|
||||
/* covers ep, dev, pid -> identifies the endpoint */
|
||||
return td->token & 0x7ffff;
|
||||
}
|
||||
}
|
||||
|
||||
static UHCIQueue *uhci_queue_get(UHCIState *s, UHCI_TD *td)
|
||||
static UHCIQueue *uhci_queue_new(UHCIState *s, uint32_t qh_addr, UHCI_TD *td,
|
||||
USBEndpoint *ep)
|
||||
{
|
||||
UHCIQueue *queue;
|
||||
|
||||
queue = g_new0(UHCIQueue, 1);
|
||||
queue->uhci = s;
|
||||
queue->qh_addr = qh_addr;
|
||||
queue->token = uhci_queue_token(td);
|
||||
queue->ep = ep;
|
||||
QTAILQ_INIT(&queue->asyncs);
|
||||
QTAILQ_INSERT_HEAD(&s->queues, queue, next);
|
||||
/* valid needs to be large enough to handle 10 frame delay
|
||||
* for initial isochronous requests */
|
||||
queue->valid = 32;
|
||||
trace_usb_uhci_queue_add(queue->token);
|
||||
return queue;
|
||||
}
|
||||
|
||||
static void uhci_queue_free(UHCIQueue *queue, const char *reason)
|
||||
{
|
||||
UHCIState *s = queue->uhci;
|
||||
UHCIAsync *async;
|
||||
|
||||
while (!QTAILQ_EMPTY(&queue->asyncs)) {
|
||||
async = QTAILQ_FIRST(&queue->asyncs);
|
||||
uhci_async_cancel(async);
|
||||
}
|
||||
|
||||
trace_usb_uhci_queue_del(queue->token, reason);
|
||||
QTAILQ_REMOVE(&s->queues, queue, next);
|
||||
g_free(queue);
|
||||
}
|
||||
|
||||
static UHCIQueue *uhci_queue_find(UHCIState *s, UHCI_TD *td)
|
||||
{
|
||||
uint32_t token = uhci_queue_token(td);
|
||||
UHCIQueue *queue;
|
||||
|
@ -177,41 +220,36 @@ static UHCIQueue *uhci_queue_get(UHCIState *s, UHCI_TD *td)
|
|||
return queue;
|
||||
}
|
||||
}
|
||||
|
||||
queue = g_new0(UHCIQueue, 1);
|
||||
queue->uhci = s;
|
||||
queue->token = token;
|
||||
QTAILQ_INIT(&queue->asyncs);
|
||||
QTAILQ_INSERT_HEAD(&s->queues, queue, next);
|
||||
trace_usb_uhci_queue_add(queue->token);
|
||||
return queue;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void uhci_queue_free(UHCIQueue *queue)
|
||||
static bool uhci_queue_verify(UHCIQueue *queue, uint32_t qh_addr, UHCI_TD *td,
|
||||
uint32_t td_addr, bool queuing)
|
||||
{
|
||||
UHCIState *s = queue->uhci;
|
||||
UHCIAsync *first = QTAILQ_FIRST(&queue->asyncs);
|
||||
|
||||
trace_usb_uhci_queue_del(queue->token);
|
||||
QTAILQ_REMOVE(&s->queues, queue, next);
|
||||
g_free(queue);
|
||||
return queue->qh_addr == qh_addr &&
|
||||
queue->token == uhci_queue_token(td) &&
|
||||
(queuing || !(td->ctrl & TD_CTRL_ACTIVE) || first == NULL ||
|
||||
first->td_addr == td_addr);
|
||||
}
|
||||
|
||||
static UHCIAsync *uhci_async_alloc(UHCIQueue *queue, uint32_t addr)
|
||||
static UHCIAsync *uhci_async_alloc(UHCIQueue *queue, uint32_t td_addr)
|
||||
{
|
||||
UHCIAsync *async = g_new0(UHCIAsync, 1);
|
||||
|
||||
async->queue = queue;
|
||||
async->td = addr;
|
||||
async->td_addr = td_addr;
|
||||
usb_packet_init(&async->packet);
|
||||
pci_dma_sglist_init(&async->sgl, &queue->uhci->dev, 1);
|
||||
trace_usb_uhci_packet_add(async->queue->token, async->td);
|
||||
trace_usb_uhci_packet_add(async->queue->token, async->td_addr);
|
||||
|
||||
return async;
|
||||
}
|
||||
|
||||
static void uhci_async_free(UHCIAsync *async)
|
||||
{
|
||||
trace_usb_uhci_packet_del(async->queue->token, async->td);
|
||||
trace_usb_uhci_packet_del(async->queue->token, async->td_addr);
|
||||
usb_packet_cleanup(&async->packet);
|
||||
qemu_sglist_destroy(&async->sgl);
|
||||
g_free(async);
|
||||
|
@ -221,21 +259,24 @@ static void uhci_async_link(UHCIAsync *async)
|
|||
{
|
||||
UHCIQueue *queue = async->queue;
|
||||
QTAILQ_INSERT_TAIL(&queue->asyncs, async, next);
|
||||
trace_usb_uhci_packet_link_async(async->queue->token, async->td);
|
||||
trace_usb_uhci_packet_link_async(async->queue->token, async->td_addr);
|
||||
}
|
||||
|
||||
static void uhci_async_unlink(UHCIAsync *async)
|
||||
{
|
||||
UHCIQueue *queue = async->queue;
|
||||
QTAILQ_REMOVE(&queue->asyncs, async, next);
|
||||
trace_usb_uhci_packet_unlink_async(async->queue->token, async->td);
|
||||
trace_usb_uhci_packet_unlink_async(async->queue->token, async->td_addr);
|
||||
}
|
||||
|
||||
static void uhci_async_cancel(UHCIAsync *async)
|
||||
{
|
||||
trace_usb_uhci_packet_cancel(async->queue->token, async->td, async->done);
|
||||
uhci_async_unlink(async);
|
||||
trace_usb_uhci_packet_cancel(async->queue->token, async->td_addr,
|
||||
async->done);
|
||||
if (!async->done)
|
||||
usb_cancel_packet(&async->packet);
|
||||
usb_packet_unmap(&async->packet, &async->sgl);
|
||||
uhci_async_free(async);
|
||||
}
|
||||
|
||||
|
@ -258,34 +299,21 @@ static void uhci_async_validate_begin(UHCIState *s)
|
|||
static void uhci_async_validate_end(UHCIState *s)
|
||||
{
|
||||
UHCIQueue *queue, *n;
|
||||
UHCIAsync *async;
|
||||
|
||||
QTAILQ_FOREACH_SAFE(queue, &s->queues, next, n) {
|
||||
if (queue->valid > 0) {
|
||||
continue;
|
||||
if (!queue->valid) {
|
||||
uhci_queue_free(queue, "validate-end");
|
||||
}
|
||||
while (!QTAILQ_EMPTY(&queue->asyncs)) {
|
||||
async = QTAILQ_FIRST(&queue->asyncs);
|
||||
uhci_async_unlink(async);
|
||||
uhci_async_cancel(async);
|
||||
}
|
||||
uhci_queue_free(queue);
|
||||
}
|
||||
}
|
||||
|
||||
static void uhci_async_cancel_device(UHCIState *s, USBDevice *dev)
|
||||
{
|
||||
UHCIQueue *queue;
|
||||
UHCIAsync *curr, *n;
|
||||
UHCIQueue *queue, *n;
|
||||
|
||||
QTAILQ_FOREACH(queue, &s->queues, next) {
|
||||
QTAILQ_FOREACH_SAFE(curr, &queue->asyncs, next, n) {
|
||||
if (!usb_packet_is_inflight(&curr->packet) ||
|
||||
curr->packet.ep->dev != dev) {
|
||||
continue;
|
||||
}
|
||||
uhci_async_unlink(curr);
|
||||
uhci_async_cancel(curr);
|
||||
QTAILQ_FOREACH_SAFE(queue, &s->queues, next, n) {
|
||||
if (queue->ep->dev == dev) {
|
||||
uhci_queue_free(queue, "cancel-device");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -293,38 +321,24 @@ static void uhci_async_cancel_device(UHCIState *s, USBDevice *dev)
|
|||
static void uhci_async_cancel_all(UHCIState *s)
|
||||
{
|
||||
UHCIQueue *queue, *nq;
|
||||
UHCIAsync *curr, *n;
|
||||
|
||||
QTAILQ_FOREACH_SAFE(queue, &s->queues, next, nq) {
|
||||
QTAILQ_FOREACH_SAFE(curr, &queue->asyncs, next, n) {
|
||||
uhci_async_unlink(curr);
|
||||
uhci_async_cancel(curr);
|
||||
}
|
||||
uhci_queue_free(queue);
|
||||
uhci_queue_free(queue, "cancel-all");
|
||||
}
|
||||
}
|
||||
|
||||
static UHCIAsync *uhci_async_find_td(UHCIState *s, uint32_t addr, UHCI_TD *td)
|
||||
static UHCIAsync *uhci_async_find_td(UHCIState *s, uint32_t td_addr)
|
||||
{
|
||||
uint32_t token = uhci_queue_token(td);
|
||||
UHCIQueue *queue;
|
||||
UHCIAsync *async;
|
||||
|
||||
QTAILQ_FOREACH(queue, &s->queues, next) {
|
||||
if (queue->token == token) {
|
||||
break;
|
||||
QTAILQ_FOREACH(async, &queue->asyncs, next) {
|
||||
if (async->td_addr == td_addr) {
|
||||
return async;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (queue == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
QTAILQ_FOREACH(async, &queue->asyncs, next) {
|
||||
if (async->td == addr) {
|
||||
return async;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -695,13 +709,15 @@ static USBDevice *uhci_find_device(UHCIState *s, uint8_t addr)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void uhci_async_complete(USBPort *port, USBPacket *packet);
|
||||
static void uhci_process_frame(UHCIState *s);
|
||||
static void uhci_read_td(UHCIState *s, UHCI_TD *td, uint32_t link)
|
||||
{
|
||||
pci_dma_read(&s->dev, link & ~0xf, td, sizeof(*td));
|
||||
le32_to_cpus(&td->link);
|
||||
le32_to_cpus(&td->ctrl);
|
||||
le32_to_cpus(&td->token);
|
||||
le32_to_cpus(&td->buffer);
|
||||
}
|
||||
|
||||
/* return -1 if fatal error (frame must be stopped)
|
||||
0 if TD successful
|
||||
1 if TD unsuccessful or inactive
|
||||
*/
|
||||
static int uhci_complete_td(UHCIState *s, UHCI_TD *td, UHCIAsync *async, uint32_t *int_mask)
|
||||
{
|
||||
int len = 0, max_len, err, ret;
|
||||
|
@ -733,100 +749,94 @@ static int uhci_complete_td(UHCIState *s, UHCI_TD *td, UHCIAsync *async, uint32_
|
|||
*int_mask |= 0x02;
|
||||
/* short packet: do not update QH */
|
||||
trace_usb_uhci_packet_complete_shortxfer(async->queue->token,
|
||||
async->td);
|
||||
async->td_addr);
|
||||
return TD_RESULT_NEXT_QH;
|
||||
}
|
||||
}
|
||||
|
||||
/* success */
|
||||
trace_usb_uhci_packet_complete_success(async->queue->token, async->td);
|
||||
trace_usb_uhci_packet_complete_success(async->queue->token,
|
||||
async->td_addr);
|
||||
return TD_RESULT_COMPLETE;
|
||||
|
||||
out:
|
||||
/*
|
||||
* We should not do any further processing on a queue with errors!
|
||||
* This is esp. important for bulk endpoints with pipelining enabled
|
||||
* (redirection to a real USB device), where we must cancel all the
|
||||
* transfers after this one so that:
|
||||
* 1) If they've completed already, they are not processed further
|
||||
* causing more stalls, originating from the same failed transfer
|
||||
* 2) If still in flight, they are cancelled before the guest does
|
||||
* a clear stall, otherwise the guest and device can loose sync!
|
||||
*/
|
||||
while (!QTAILQ_EMPTY(&async->queue->asyncs)) {
|
||||
UHCIAsync *as = QTAILQ_FIRST(&async->queue->asyncs);
|
||||
uhci_async_unlink(as);
|
||||
uhci_async_cancel(as);
|
||||
}
|
||||
|
||||
switch(ret) {
|
||||
case USB_RET_NAK:
|
||||
td->ctrl |= TD_CTRL_NAK;
|
||||
return TD_RESULT_NEXT_QH;
|
||||
|
||||
case USB_RET_STALL:
|
||||
td->ctrl |= TD_CTRL_STALL;
|
||||
td->ctrl &= ~TD_CTRL_ACTIVE;
|
||||
s->status |= UHCI_STS_USBERR;
|
||||
if (td->ctrl & TD_CTRL_IOC) {
|
||||
*int_mask |= 0x01;
|
||||
}
|
||||
uhci_update_irq(s);
|
||||
trace_usb_uhci_packet_complete_stall(async->queue->token, async->td);
|
||||
return TD_RESULT_NEXT_QH;
|
||||
trace_usb_uhci_packet_complete_stall(async->queue->token,
|
||||
async->td_addr);
|
||||
err = TD_RESULT_NEXT_QH;
|
||||
break;
|
||||
|
||||
case USB_RET_BABBLE:
|
||||
td->ctrl |= TD_CTRL_BABBLE | TD_CTRL_STALL;
|
||||
td->ctrl &= ~TD_CTRL_ACTIVE;
|
||||
s->status |= UHCI_STS_USBERR;
|
||||
if (td->ctrl & TD_CTRL_IOC) {
|
||||
*int_mask |= 0x01;
|
||||
}
|
||||
uhci_update_irq(s);
|
||||
/* frame interrupted */
|
||||
trace_usb_uhci_packet_complete_babble(async->queue->token, async->td);
|
||||
return TD_RESULT_STOP_FRAME;
|
||||
|
||||
case USB_RET_NAK:
|
||||
td->ctrl |= TD_CTRL_NAK;
|
||||
if (pid == USB_TOKEN_SETUP)
|
||||
break;
|
||||
return TD_RESULT_NEXT_QH;
|
||||
trace_usb_uhci_packet_complete_babble(async->queue->token,
|
||||
async->td_addr);
|
||||
err = TD_RESULT_STOP_FRAME;
|
||||
break;
|
||||
|
||||
case USB_RET_IOERROR:
|
||||
case USB_RET_NODEV:
|
||||
default:
|
||||
break;
|
||||
td->ctrl |= TD_CTRL_TIMEOUT;
|
||||
td->ctrl &= ~(3 << TD_CTRL_ERROR_SHIFT);
|
||||
trace_usb_uhci_packet_complete_error(async->queue->token,
|
||||
async->td_addr);
|
||||
err = TD_RESULT_NEXT_QH;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Retry the TD if error count is not zero */
|
||||
|
||||
td->ctrl |= TD_CTRL_TIMEOUT;
|
||||
err = (td->ctrl >> TD_CTRL_ERROR_SHIFT) & 3;
|
||||
if (err != 0) {
|
||||
err--;
|
||||
if (err == 0) {
|
||||
td->ctrl &= ~TD_CTRL_ACTIVE;
|
||||
s->status |= UHCI_STS_USBERR;
|
||||
if (td->ctrl & TD_CTRL_IOC)
|
||||
*int_mask |= 0x01;
|
||||
uhci_update_irq(s);
|
||||
trace_usb_uhci_packet_complete_error(async->queue->token,
|
||||
async->td);
|
||||
}
|
||||
td->ctrl &= ~TD_CTRL_ACTIVE;
|
||||
s->status |= UHCI_STS_USBERR;
|
||||
if (td->ctrl & TD_CTRL_IOC) {
|
||||
*int_mask |= 0x01;
|
||||
}
|
||||
td->ctrl = (td->ctrl & ~(3 << TD_CTRL_ERROR_SHIFT)) |
|
||||
(err << TD_CTRL_ERROR_SHIFT);
|
||||
return TD_RESULT_NEXT_QH;
|
||||
uhci_update_irq(s);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int uhci_handle_td(UHCIState *s, uint32_t addr, UHCI_TD *td,
|
||||
uint32_t *int_mask, bool queuing)
|
||||
static int uhci_handle_td(UHCIState *s, UHCIQueue *q, uint32_t qh_addr,
|
||||
UHCI_TD *td, uint32_t td_addr, uint32_t *int_mask)
|
||||
{
|
||||
UHCIAsync *async;
|
||||
int len = 0, max_len;
|
||||
uint8_t pid;
|
||||
USBDevice *dev;
|
||||
USBEndpoint *ep;
|
||||
bool spd;
|
||||
bool queuing = (q != NULL);
|
||||
uint8_t pid = td->token & 0xff;
|
||||
UHCIAsync *async = uhci_async_find_td(s, td_addr);
|
||||
|
||||
if (async) {
|
||||
if (uhci_queue_verify(async->queue, qh_addr, td, td_addr, queuing)) {
|
||||
assert(q == NULL || q == async->queue);
|
||||
q = async->queue;
|
||||
} else {
|
||||
uhci_queue_free(async->queue, "guest re-used pending td");
|
||||
async = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (q == NULL) {
|
||||
q = uhci_queue_find(s, td);
|
||||
if (q && !uhci_queue_verify(q, qh_addr, td, td_addr, queuing)) {
|
||||
uhci_queue_free(q, "guest re-used qh");
|
||||
q = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (q) {
|
||||
q->valid = 32;
|
||||
}
|
||||
|
||||
/* Is active ? */
|
||||
if (!(td->ctrl & TD_CTRL_ACTIVE)) {
|
||||
if (async) {
|
||||
/* Guest marked a pending td non-active, cancel the queue */
|
||||
uhci_queue_free(async->queue, "pending td non-active");
|
||||
}
|
||||
/*
|
||||
* ehci11d spec page 22: "Even if the Active bit in the TD is already
|
||||
* cleared when the TD is fetched ... an IOC interrupt is generated"
|
||||
|
@ -837,56 +847,60 @@ static int uhci_handle_td(UHCIState *s, uint32_t addr, UHCI_TD *td,
|
|||
return TD_RESULT_NEXT_QH;
|
||||
}
|
||||
|
||||
async = uhci_async_find_td(s, addr, td);
|
||||
if (async) {
|
||||
/* Already submitted */
|
||||
async->queue->valid = 32;
|
||||
|
||||
if (!async->done)
|
||||
return TD_RESULT_ASYNC_CONT;
|
||||
if (queuing) {
|
||||
/* we are busy filling the queue, we are not prepared
|
||||
to consume completed packages then, just leave them
|
||||
in async state */
|
||||
return TD_RESULT_ASYNC_CONT;
|
||||
}
|
||||
if (!async->done) {
|
||||
UHCI_TD last_td;
|
||||
UHCIAsync *last = QTAILQ_LAST(&async->queue->asyncs, asyncs_head);
|
||||
/*
|
||||
* While we are waiting for the current td to complete, the guest
|
||||
* may have added more tds to the queue. Note we re-read the td
|
||||
* rather then caching it, as we want to see guest made changes!
|
||||
*/
|
||||
uhci_read_td(s, &last_td, last->td_addr);
|
||||
uhci_queue_fill(async->queue, &last_td);
|
||||
|
||||
return TD_RESULT_ASYNC_CONT;
|
||||
}
|
||||
uhci_async_unlink(async);
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Allocate new packet */
|
||||
async = uhci_async_alloc(uhci_queue_get(s, td), addr);
|
||||
|
||||
/* valid needs to be large enough to handle 10 frame delay
|
||||
* for initial isochronous requests
|
||||
*/
|
||||
async->queue->valid = 32;
|
||||
async->isoc = td->ctrl & TD_CTRL_IOS;
|
||||
if (q == NULL) {
|
||||
USBDevice *dev = uhci_find_device(s, (td->token >> 8) & 0x7f);
|
||||
USBEndpoint *ep = usb_ep_get(dev, pid, (td->token >> 15) & 0xf);
|
||||
q = uhci_queue_new(s, qh_addr, td, ep);
|
||||
}
|
||||
async = uhci_async_alloc(q, td_addr);
|
||||
|
||||
max_len = ((td->token >> 21) + 1) & 0x7ff;
|
||||
pid = td->token & 0xff;
|
||||
|
||||
dev = uhci_find_device(s, (td->token >> 8) & 0x7f);
|
||||
ep = usb_ep_get(dev, pid, (td->token >> 15) & 0xf);
|
||||
usb_packet_setup(&async->packet, pid, ep, addr);
|
||||
spd = (pid == USB_TOKEN_IN && (td->ctrl & TD_CTRL_SPD) != 0);
|
||||
usb_packet_setup(&async->packet, pid, q->ep, td_addr, spd,
|
||||
(td->ctrl & TD_CTRL_IOC) != 0);
|
||||
qemu_sglist_add(&async->sgl, td->buffer, max_len);
|
||||
usb_packet_map(&async->packet, &async->sgl);
|
||||
|
||||
switch(pid) {
|
||||
case USB_TOKEN_OUT:
|
||||
case USB_TOKEN_SETUP:
|
||||
len = usb_handle_packet(dev, &async->packet);
|
||||
len = usb_handle_packet(q->ep->dev, &async->packet);
|
||||
if (len >= 0)
|
||||
len = max_len;
|
||||
break;
|
||||
|
||||
case USB_TOKEN_IN:
|
||||
len = usb_handle_packet(dev, &async->packet);
|
||||
len = usb_handle_packet(q->ep->dev, &async->packet);
|
||||
break;
|
||||
|
||||
default:
|
||||
/* invalid pid : frame interrupted */
|
||||
usb_packet_unmap(&async->packet, &async->sgl);
|
||||
uhci_async_free(async);
|
||||
s->status |= UHCI_STS_HCPERR;
|
||||
uhci_update_irq(s);
|
||||
|
@ -895,6 +909,9 @@ static int uhci_handle_td(UHCIState *s, uint32_t addr, UHCI_TD *td,
|
|||
|
||||
if (len == USB_RET_ASYNC) {
|
||||
uhci_async_link(async);
|
||||
if (!queuing) {
|
||||
uhci_queue_fill(q, td);
|
||||
}
|
||||
return TD_RESULT_ASYNC_START;
|
||||
}
|
||||
|
||||
|
@ -912,30 +929,15 @@ static void uhci_async_complete(USBPort *port, USBPacket *packet)
|
|||
UHCIAsync *async = container_of(packet, UHCIAsync, packet);
|
||||
UHCIState *s = async->queue->uhci;
|
||||
|
||||
if (async->isoc) {
|
||||
UHCI_TD td;
|
||||
uint32_t link = async->td;
|
||||
uint32_t int_mask = 0, val;
|
||||
|
||||
pci_dma_read(&s->dev, link & ~0xf, &td, sizeof(td));
|
||||
le32_to_cpus(&td.link);
|
||||
le32_to_cpus(&td.ctrl);
|
||||
le32_to_cpus(&td.token);
|
||||
le32_to_cpus(&td.buffer);
|
||||
|
||||
if (packet->result == USB_RET_REMOVE_FROM_QUEUE) {
|
||||
uhci_async_unlink(async);
|
||||
uhci_complete_td(s, &td, async, &int_mask);
|
||||
s->pending_int_mask |= int_mask;
|
||||
uhci_async_cancel(async);
|
||||
return;
|
||||
}
|
||||
|
||||
/* update the status bits of the TD */
|
||||
val = cpu_to_le32(td.ctrl);
|
||||
pci_dma_write(&s->dev, (link & ~0xf) + 4, &val, sizeof(val));
|
||||
uhci_async_free(async);
|
||||
} else {
|
||||
async->done = 1;
|
||||
if (s->frame_bytes < s->frame_bandwidth) {
|
||||
qemu_bh_schedule(s->bh);
|
||||
}
|
||||
async->done = 1;
|
||||
if (s->frame_bytes < s->frame_bandwidth) {
|
||||
qemu_bh_schedule(s->bh);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -981,38 +983,31 @@ static int qhdb_insert(QhDb *db, uint32_t addr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void uhci_fill_queue(UHCIState *s, UHCI_TD *td)
|
||||
static void uhci_queue_fill(UHCIQueue *q, UHCI_TD *td)
|
||||
{
|
||||
uint32_t int_mask = 0;
|
||||
uint32_t plink = td->link;
|
||||
uint32_t token = uhci_queue_token(td);
|
||||
UHCI_TD ptd;
|
||||
int ret;
|
||||
|
||||
while (is_valid(plink)) {
|
||||
pci_dma_read(&s->dev, plink & ~0xf, &ptd, sizeof(ptd));
|
||||
le32_to_cpus(&ptd.link);
|
||||
le32_to_cpus(&ptd.ctrl);
|
||||
le32_to_cpus(&ptd.token);
|
||||
le32_to_cpus(&ptd.buffer);
|
||||
uhci_read_td(q->uhci, &ptd, plink);
|
||||
if (!(ptd.ctrl & TD_CTRL_ACTIVE)) {
|
||||
break;
|
||||
}
|
||||
if (uhci_queue_token(&ptd) != token) {
|
||||
if (uhci_queue_token(&ptd) != q->token) {
|
||||
break;
|
||||
}
|
||||
trace_usb_uhci_td_queue(plink & ~0xf, ptd.ctrl, ptd.token);
|
||||
ret = uhci_handle_td(s, plink, &ptd, &int_mask, true);
|
||||
ret = uhci_handle_td(q->uhci, q, q->qh_addr, &ptd, plink, &int_mask);
|
||||
if (ret == TD_RESULT_ASYNC_CONT) {
|
||||
break;
|
||||
}
|
||||
assert(ret == TD_RESULT_ASYNC_START);
|
||||
assert(int_mask == 0);
|
||||
if (ptd.ctrl & TD_CTRL_SPD) {
|
||||
break;
|
||||
}
|
||||
plink = ptd.link;
|
||||
}
|
||||
usb_device_flush_ep_queue(q->ep->dev, q->ep);
|
||||
}
|
||||
|
||||
static void uhci_process_frame(UHCIState *s)
|
||||
|
@ -1081,15 +1076,11 @@ static void uhci_process_frame(UHCIState *s)
|
|||
}
|
||||
|
||||
/* TD */
|
||||
pci_dma_read(&s->dev, link & ~0xf, &td, sizeof(td));
|
||||
le32_to_cpus(&td.link);
|
||||
le32_to_cpus(&td.ctrl);
|
||||
le32_to_cpus(&td.token);
|
||||
le32_to_cpus(&td.buffer);
|
||||
uhci_read_td(s, &td, link);
|
||||
trace_usb_uhci_td_load(curr_qh & ~0xf, link & ~0xf, td.ctrl, td.token);
|
||||
|
||||
old_td_ctrl = td.ctrl;
|
||||
ret = uhci_handle_td(s, link, &td, &int_mask, false);
|
||||
ret = uhci_handle_td(s, NULL, curr_qh, &td, link, &int_mask);
|
||||
if (old_td_ctrl != td.ctrl) {
|
||||
/* update the status bits of the TD */
|
||||
val = cpu_to_le32(td.ctrl);
|
||||
|
@ -1108,9 +1099,6 @@ static void uhci_process_frame(UHCIState *s)
|
|||
|
||||
case TD_RESULT_ASYNC_START:
|
||||
trace_usb_uhci_td_async(curr_qh & ~0xf, link & ~0xf);
|
||||
if (is_valid(td.link) && !(td.ctrl & TD_CTRL_SPD)) {
|
||||
uhci_fill_queue(s, &td);
|
||||
}
|
||||
link = curr_qh ? qh.link : td.link;
|
||||
continue;
|
||||
|
||||
|
|
|
@ -322,6 +322,7 @@ typedef struct XHCITransfer {
|
|||
bool running_retry;
|
||||
bool cancelled;
|
||||
bool complete;
|
||||
bool int_req;
|
||||
unsigned int iso_pkts;
|
||||
unsigned int slotid;
|
||||
unsigned int epid;
|
||||
|
@ -416,6 +417,8 @@ struct XHCIState {
|
|||
/* properties */
|
||||
uint32_t numports_2;
|
||||
uint32_t numports_3;
|
||||
uint32_t numintrs;
|
||||
uint32_t numslots;
|
||||
uint32_t flags;
|
||||
|
||||
/* Operational Registers */
|
||||
|
@ -815,8 +818,8 @@ static void xhci_event(XHCIState *xhci, XHCIEvent *event, int v)
|
|||
dma_addr_t erdp;
|
||||
unsigned int dp_idx;
|
||||
|
||||
if (v >= MAXINTRS) {
|
||||
DPRINTF("intr nr out of range (%d >= %d)\n", v, MAXINTRS);
|
||||
if (v >= xhci->numintrs) {
|
||||
DPRINTF("intr nr out of range (%d >= %d)\n", v, xhci->numintrs);
|
||||
return;
|
||||
}
|
||||
intr = &xhci->intr[v];
|
||||
|
@ -963,6 +966,12 @@ static void xhci_er_reset(XHCIState *xhci, int v)
|
|||
XHCIInterrupter *intr = &xhci->intr[v];
|
||||
XHCIEvRingSeg seg;
|
||||
|
||||
if (intr->erstsz == 0) {
|
||||
/* disabled */
|
||||
intr->er_start = 0;
|
||||
intr->er_size = 0;
|
||||
return;
|
||||
}
|
||||
/* cache the (sole) event ring segment location */
|
||||
if (intr->erstsz != 1) {
|
||||
fprintf(stderr, "xhci: invalid value for ERSTSZ: %d\n", intr->erstsz);
|
||||
|
@ -1008,9 +1017,6 @@ static void xhci_set_ep_state(XHCIState *xhci, XHCIEPContext *epctx,
|
|||
uint32_t state)
|
||||
{
|
||||
uint32_t ctx[5];
|
||||
if (epctx->state == state) {
|
||||
return;
|
||||
}
|
||||
|
||||
pci_dma_read(&xhci->pci_dev, epctx->pctx, ctx, sizeof(ctx));
|
||||
ctx[0] &= ~EP_STATE_MASK;
|
||||
|
@ -1039,7 +1045,7 @@ static TRBCCode xhci_enable_ep(XHCIState *xhci, unsigned int slotid,
|
|||
int i;
|
||||
|
||||
trace_usb_xhci_ep_enable(slotid, epid);
|
||||
assert(slotid >= 1 && slotid <= MAXSLOTS);
|
||||
assert(slotid >= 1 && slotid <= xhci->numslots);
|
||||
assert(epid >= 1 && epid <= 31);
|
||||
|
||||
slot = &xhci->slots[slotid-1];
|
||||
|
@ -1082,13 +1088,42 @@ static TRBCCode xhci_enable_ep(XHCIState *xhci, unsigned int slotid,
|
|||
return CC_SUCCESS;
|
||||
}
|
||||
|
||||
static int xhci_ep_nuke_one_xfer(XHCITransfer *t)
|
||||
{
|
||||
int killed = 0;
|
||||
|
||||
if (t->running_async) {
|
||||
usb_cancel_packet(&t->packet);
|
||||
t->running_async = 0;
|
||||
t->cancelled = 1;
|
||||
DPRINTF("xhci: cancelling transfer, waiting for it to complete\n");
|
||||
killed = 1;
|
||||
}
|
||||
if (t->running_retry) {
|
||||
XHCIEPContext *epctx = t->xhci->slots[t->slotid-1].eps[t->epid-1];
|
||||
if (epctx) {
|
||||
epctx->retry = NULL;
|
||||
qemu_del_timer(epctx->kick_timer);
|
||||
}
|
||||
t->running_retry = 0;
|
||||
}
|
||||
if (t->trbs) {
|
||||
g_free(t->trbs);
|
||||
}
|
||||
|
||||
t->trbs = NULL;
|
||||
t->trb_count = t->trb_alloced = 0;
|
||||
|
||||
return killed;
|
||||
}
|
||||
|
||||
static int xhci_ep_nuke_xfers(XHCIState *xhci, unsigned int slotid,
|
||||
unsigned int epid)
|
||||
{
|
||||
XHCISlot *slot;
|
||||
XHCIEPContext *epctx;
|
||||
int i, xferi, killed = 0;
|
||||
assert(slotid >= 1 && slotid <= MAXSLOTS);
|
||||
assert(slotid >= 1 && slotid <= xhci->numslots);
|
||||
assert(epid >= 1 && epid <= 31);
|
||||
|
||||
DPRINTF("xhci_ep_nuke_xfers(%d, %d)\n", slotid, epid);
|
||||
|
@ -1103,25 +1138,7 @@ static int xhci_ep_nuke_xfers(XHCIState *xhci, unsigned int slotid,
|
|||
|
||||
xferi = epctx->next_xfer;
|
||||
for (i = 0; i < TD_QUEUE; i++) {
|
||||
XHCITransfer *t = &epctx->transfers[xferi];
|
||||
if (t->running_async) {
|
||||
usb_cancel_packet(&t->packet);
|
||||
t->running_async = 0;
|
||||
t->cancelled = 1;
|
||||
DPRINTF("xhci: cancelling transfer %d, waiting for it to complete...\n", i);
|
||||
killed++;
|
||||
}
|
||||
if (t->running_retry) {
|
||||
t->running_retry = 0;
|
||||
epctx->retry = NULL;
|
||||
qemu_del_timer(epctx->kick_timer);
|
||||
}
|
||||
if (t->trbs) {
|
||||
g_free(t->trbs);
|
||||
}
|
||||
|
||||
t->trbs = NULL;
|
||||
t->trb_count = t->trb_alloced = 0;
|
||||
killed += xhci_ep_nuke_one_xfer(&epctx->transfers[xferi]);
|
||||
xferi = (xferi + 1) % TD_QUEUE;
|
||||
}
|
||||
return killed;
|
||||
|
@ -1134,7 +1151,7 @@ static TRBCCode xhci_disable_ep(XHCIState *xhci, unsigned int slotid,
|
|||
XHCIEPContext *epctx;
|
||||
|
||||
trace_usb_xhci_ep_disable(slotid, epid);
|
||||
assert(slotid >= 1 && slotid <= MAXSLOTS);
|
||||
assert(slotid >= 1 && slotid <= xhci->numslots);
|
||||
assert(epid >= 1 && epid <= 31);
|
||||
|
||||
slot = &xhci->slots[slotid-1];
|
||||
|
@ -1164,7 +1181,7 @@ static TRBCCode xhci_stop_ep(XHCIState *xhci, unsigned int slotid,
|
|||
XHCIEPContext *epctx;
|
||||
|
||||
trace_usb_xhci_ep_stop(slotid, epid);
|
||||
assert(slotid >= 1 && slotid <= MAXSLOTS);
|
||||
assert(slotid >= 1 && slotid <= xhci->numslots);
|
||||
|
||||
if (epid < 1 || epid > 31) {
|
||||
fprintf(stderr, "xhci: bad ep %d\n", epid);
|
||||
|
@ -1198,7 +1215,7 @@ static TRBCCode xhci_reset_ep(XHCIState *xhci, unsigned int slotid,
|
|||
USBDevice *dev;
|
||||
|
||||
trace_usb_xhci_ep_reset(slotid, epid);
|
||||
assert(slotid >= 1 && slotid <= MAXSLOTS);
|
||||
assert(slotid >= 1 && slotid <= xhci->numslots);
|
||||
|
||||
if (epid < 1 || epid > 31) {
|
||||
fprintf(stderr, "xhci: bad ep %d\n", epid);
|
||||
|
@ -1248,7 +1265,7 @@ static TRBCCode xhci_set_ep_dequeue(XHCIState *xhci, unsigned int slotid,
|
|||
XHCIEPContext *epctx;
|
||||
dma_addr_t dequeue;
|
||||
|
||||
assert(slotid >= 1 && slotid <= MAXSLOTS);
|
||||
assert(slotid >= 1 && slotid <= xhci->numslots);
|
||||
|
||||
if (epid < 1 || epid > 31) {
|
||||
fprintf(stderr, "xhci: bad ep %d\n", epid);
|
||||
|
@ -1281,18 +1298,22 @@ static TRBCCode xhci_set_ep_dequeue(XHCIState *xhci, unsigned int slotid,
|
|||
return CC_SUCCESS;
|
||||
}
|
||||
|
||||
static int xhci_xfer_map(XHCITransfer *xfer)
|
||||
static int xhci_xfer_create_sgl(XHCITransfer *xfer, int in_xfer)
|
||||
{
|
||||
int in_xfer = (xfer->packet.pid == USB_TOKEN_IN);
|
||||
XHCIState *xhci = xfer->xhci;
|
||||
int i;
|
||||
|
||||
xfer->int_req = false;
|
||||
pci_dma_sglist_init(&xfer->sgl, &xhci->pci_dev, xfer->trb_count);
|
||||
for (i = 0; i < xfer->trb_count; i++) {
|
||||
XHCITRB *trb = &xfer->trbs[i];
|
||||
dma_addr_t addr;
|
||||
unsigned int chunk = 0;
|
||||
|
||||
if (trb->control & TRB_TR_IOC) {
|
||||
xfer->int_req = true;
|
||||
}
|
||||
|
||||
switch (TRB_TYPE(*trb)) {
|
||||
case TR_DATA:
|
||||
if ((!(trb->control & TRB_TR_DIR)) != (!in_xfer)) {
|
||||
|
@ -1317,7 +1338,6 @@ static int xhci_xfer_map(XHCITransfer *xfer)
|
|||
}
|
||||
}
|
||||
|
||||
usb_packet_map(&xfer->packet, &xfer->sgl);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
|
@ -1435,8 +1455,10 @@ static int xhci_setup_packet(XHCITransfer *xfer)
|
|||
ep = usb_ep_get(dev, dir, xfer->epid >> 1);
|
||||
}
|
||||
|
||||
usb_packet_setup(&xfer->packet, dir, ep, xfer->trbs[0].addr);
|
||||
xhci_xfer_map(xfer);
|
||||
xhci_xfer_create_sgl(xfer, dir == USB_TOKEN_IN); /* Also sets int_req */
|
||||
usb_packet_setup(&xfer->packet, dir, ep, xfer->trbs[0].addr, false,
|
||||
xfer->int_req);
|
||||
usb_packet_map(&xfer->packet, &xfer->sgl);
|
||||
DPRINTF("xhci: setup packet pid 0x%x addr %d ep %d\n",
|
||||
xfer->packet.pid, dev->addr, ep->nr);
|
||||
return 0;
|
||||
|
@ -1641,12 +1663,13 @@ static int xhci_fire_transfer(XHCIState *xhci, XHCITransfer *xfer, XHCIEPContext
|
|||
static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid, unsigned int epid)
|
||||
{
|
||||
XHCIEPContext *epctx;
|
||||
USBEndpoint *ep = NULL;
|
||||
uint64_t mfindex;
|
||||
int length;
|
||||
int i;
|
||||
|
||||
trace_usb_xhci_ep_kick(slotid, epid);
|
||||
assert(slotid >= 1 && slotid <= MAXSLOTS);
|
||||
assert(slotid >= 1 && slotid <= xhci->numslots);
|
||||
assert(epid >= 1 && epid <= 31);
|
||||
|
||||
if (!xhci->slots[slotid-1].enabled) {
|
||||
|
@ -1734,12 +1757,14 @@ static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid, unsigned int epid
|
|||
if (epid == 1) {
|
||||
if (xhci_fire_ctl_transfer(xhci, xfer) >= 0) {
|
||||
epctx->next_xfer = (epctx->next_xfer + 1) % TD_QUEUE;
|
||||
ep = xfer->packet.ep;
|
||||
} else {
|
||||
fprintf(stderr, "xhci: error firing CTL transfer\n");
|
||||
}
|
||||
} else {
|
||||
if (xhci_fire_transfer(xhci, xfer, epctx) >= 0) {
|
||||
epctx->next_xfer = (epctx->next_xfer + 1) % TD_QUEUE;
|
||||
ep = xfer->packet.ep;
|
||||
} else {
|
||||
if (!xfer->iso_xfer) {
|
||||
fprintf(stderr, "xhci: error firing data transfer\n");
|
||||
|
@ -1756,12 +1781,15 @@ static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid, unsigned int epid
|
|||
break;
|
||||
}
|
||||
}
|
||||
if (ep) {
|
||||
usb_device_flush_ep_queue(ep->dev, ep);
|
||||
}
|
||||
}
|
||||
|
||||
static TRBCCode xhci_enable_slot(XHCIState *xhci, unsigned int slotid)
|
||||
{
|
||||
trace_usb_xhci_slot_enable(slotid);
|
||||
assert(slotid >= 1 && slotid <= MAXSLOTS);
|
||||
assert(slotid >= 1 && slotid <= xhci->numslots);
|
||||
xhci->slots[slotid-1].enabled = 1;
|
||||
xhci->slots[slotid-1].uport = NULL;
|
||||
memset(xhci->slots[slotid-1].eps, 0, sizeof(XHCIEPContext*)*31);
|
||||
|
@ -1774,7 +1802,7 @@ static TRBCCode xhci_disable_slot(XHCIState *xhci, unsigned int slotid)
|
|||
int i;
|
||||
|
||||
trace_usb_xhci_slot_disable(slotid);
|
||||
assert(slotid >= 1 && slotid <= MAXSLOTS);
|
||||
assert(slotid >= 1 && slotid <= xhci->numslots);
|
||||
|
||||
for (i = 1; i <= 31; i++) {
|
||||
if (xhci->slots[slotid-1].eps[i-1]) {
|
||||
|
@ -1826,7 +1854,7 @@ static TRBCCode xhci_address_slot(XHCIState *xhci, unsigned int slotid,
|
|||
TRBCCode res;
|
||||
|
||||
trace_usb_xhci_slot_address(slotid);
|
||||
assert(slotid >= 1 && slotid <= MAXSLOTS);
|
||||
assert(slotid >= 1 && slotid <= xhci->numslots);
|
||||
|
||||
dcbaap = xhci_addr64(xhci->dcbaap_low, xhci->dcbaap_high);
|
||||
pci_dma_read(&xhci->pci_dev, dcbaap + 8*slotid, &poctx, sizeof(poctx));
|
||||
|
@ -1865,7 +1893,7 @@ static TRBCCode xhci_address_slot(XHCIState *xhci, unsigned int slotid,
|
|||
return CC_USB_TRANSACTION_ERROR;
|
||||
}
|
||||
|
||||
for (i = 0; i < MAXSLOTS; i++) {
|
||||
for (i = 0; i < xhci->numslots; i++) {
|
||||
if (xhci->slots[i].uport == uport) {
|
||||
fprintf(stderr, "xhci: port %s already assigned to slot %d\n",
|
||||
uport->path, i+1);
|
||||
|
@ -1914,7 +1942,7 @@ static TRBCCode xhci_configure_slot(XHCIState *xhci, unsigned int slotid,
|
|||
TRBCCode res;
|
||||
|
||||
trace_usb_xhci_slot_configure(slotid);
|
||||
assert(slotid >= 1 && slotid <= MAXSLOTS);
|
||||
assert(slotid >= 1 && slotid <= xhci->numslots);
|
||||
|
||||
ictx = xhci_mask64(pictx);
|
||||
octx = xhci->slots[slotid-1].ctx;
|
||||
|
@ -2002,7 +2030,7 @@ static TRBCCode xhci_evaluate_slot(XHCIState *xhci, unsigned int slotid,
|
|||
uint32_t slot_ctx[4];
|
||||
|
||||
trace_usb_xhci_slot_evaluate(slotid);
|
||||
assert(slotid >= 1 && slotid <= MAXSLOTS);
|
||||
assert(slotid >= 1 && slotid <= xhci->numslots);
|
||||
|
||||
ictx = xhci_mask64(pictx);
|
||||
octx = xhci->slots[slotid-1].ctx;
|
||||
|
@ -2065,7 +2093,7 @@ static TRBCCode xhci_reset_slot(XHCIState *xhci, unsigned int slotid)
|
|||
int i;
|
||||
|
||||
trace_usb_xhci_slot_reset(slotid);
|
||||
assert(slotid >= 1 && slotid <= MAXSLOTS);
|
||||
assert(slotid >= 1 && slotid <= xhci->numslots);
|
||||
|
||||
octx = xhci->slots[slotid-1].ctx;
|
||||
|
||||
|
@ -2091,7 +2119,7 @@ static unsigned int xhci_get_slot(XHCIState *xhci, XHCIEvent *event, XHCITRB *tr
|
|||
{
|
||||
unsigned int slotid;
|
||||
slotid = (trb->control >> TRB_CR_SLOTID_SHIFT) & TRB_CR_SLOTID_MASK;
|
||||
if (slotid < 1 || slotid > MAXSLOTS) {
|
||||
if (slotid < 1 || slotid > xhci->numslots) {
|
||||
fprintf(stderr, "xhci: bad slot id %d\n", slotid);
|
||||
event->ccode = CC_TRB_ERROR;
|
||||
return 0;
|
||||
|
@ -2183,12 +2211,12 @@ static void xhci_process_commands(XHCIState *xhci)
|
|||
event.ptr = addr;
|
||||
switch (type) {
|
||||
case CR_ENABLE_SLOT:
|
||||
for (i = 0; i < MAXSLOTS; i++) {
|
||||
for (i = 0; i < xhci->numslots; i++) {
|
||||
if (!xhci->slots[i].enabled) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i >= MAXSLOTS) {
|
||||
if (i >= xhci->numslots) {
|
||||
fprintf(stderr, "xhci: no device slots available\n");
|
||||
event.ccode = CC_NO_SLOTS_ERROR;
|
||||
} else {
|
||||
|
@ -2335,7 +2363,7 @@ static void xhci_reset(DeviceState *dev)
|
|||
xhci->config = 0;
|
||||
xhci->devaddr = 2;
|
||||
|
||||
for (i = 0; i < MAXSLOTS; i++) {
|
||||
for (i = 0; i < xhci->numslots; i++) {
|
||||
xhci_disable_slot(xhci, i+1);
|
||||
}
|
||||
|
||||
|
@ -2343,7 +2371,7 @@ static void xhci_reset(DeviceState *dev)
|
|||
xhci_update_port(xhci, xhci->ports + i, 0);
|
||||
}
|
||||
|
||||
for (i = 0; i < MAXINTRS; i++) {
|
||||
for (i = 0; i < xhci->numintrs; i++) {
|
||||
xhci->intr[i].iman = 0;
|
||||
xhci->intr[i].imod = 0;
|
||||
xhci->intr[i].erstsz = 0;
|
||||
|
@ -2375,7 +2403,7 @@ static uint64_t xhci_cap_read(void *ptr, hwaddr reg, unsigned size)
|
|||
break;
|
||||
case 0x04: /* HCSPARAMS 1 */
|
||||
ret = ((xhci->numports_2+xhci->numports_3)<<24)
|
||||
| (MAXINTRS<<8) | MAXSLOTS;
|
||||
| (xhci->numintrs<<8) | xhci->numslots;
|
||||
break;
|
||||
case 0x08: /* HCSPARAMS 2 */
|
||||
ret = 0x0000000f;
|
||||
|
@ -2402,7 +2430,7 @@ static uint64_t xhci_cap_read(void *ptr, hwaddr reg, unsigned size)
|
|||
ret = 0x02000402; /* USB 2.0 */
|
||||
break;
|
||||
case 0x24: /* Supported Protocol:04 */
|
||||
ret = 0x20425455; /* "USB " */
|
||||
ret = 0x20425355; /* "USB " */
|
||||
break;
|
||||
case 0x28: /* Supported Protocol:08 */
|
||||
ret = 0x00000001 | (xhci->numports_2<<8);
|
||||
|
@ -2414,7 +2442,7 @@ static uint64_t xhci_cap_read(void *ptr, hwaddr reg, unsigned size)
|
|||
ret = 0x03000002; /* USB 3.0 */
|
||||
break;
|
||||
case 0x34: /* Supported Protocol:04 */
|
||||
ret = 0x20425455; /* "USB " */
|
||||
ret = 0x20425355; /* "USB " */
|
||||
break;
|
||||
case 0x38: /* Supported Protocol:08 */
|
||||
ret = 0x00000000 | (xhci->numports_2+1) | (xhci->numports_3<<8);
|
||||
|
@ -2653,7 +2681,7 @@ static void xhci_runtime_write(void *ptr, hwaddr reg,
|
|||
trace_usb_xhci_runtime_write(reg, val);
|
||||
|
||||
if (reg < 0x20) {
|
||||
fprintf(stderr, "xhci_oper_write: reg 0x%x unimplemented\n", (int)reg);
|
||||
fprintf(stderr, "%s: reg 0x%x unimplemented\n", __func__, (int)reg);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2730,7 +2758,7 @@ static void xhci_doorbell_write(void *ptr, hwaddr reg,
|
|||
(uint32_t)val);
|
||||
}
|
||||
} else {
|
||||
if (reg > MAXSLOTS) {
|
||||
if (reg > xhci->numslots) {
|
||||
fprintf(stderr, "xhci: bad doorbell %d\n", (int)reg);
|
||||
} else if (val > 31) {
|
||||
fprintf(stderr, "xhci: bad doorbell %d write: 0x%x\n",
|
||||
|
@ -2822,6 +2850,10 @@ static void xhci_complete(USBPort *port, USBPacket *packet)
|
|||
{
|
||||
XHCITransfer *xfer = container_of(packet, XHCITransfer, packet);
|
||||
|
||||
if (packet->result == USB_RET_REMOVE_FROM_QUEUE) {
|
||||
xhci_ep_nuke_one_xfer(xfer);
|
||||
return;
|
||||
}
|
||||
xhci_complete_packet(xfer, packet->result);
|
||||
xhci_kick_ep(xfer->xhci, xfer->slotid, xfer->epid);
|
||||
}
|
||||
|
@ -2832,7 +2864,7 @@ static void xhci_child_detach(USBPort *uport, USBDevice *child)
|
|||
XHCIState *xhci = container_of(bus, XHCIState, bus);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAXSLOTS; i++) {
|
||||
for (i = 0; i < xhci->numslots; i++) {
|
||||
if (xhci->slots[i].uport == uport) {
|
||||
xhci->slots[i].uport = NULL;
|
||||
}
|
||||
|
@ -2852,7 +2884,7 @@ static int xhci_find_slotid(XHCIState *xhci, USBDevice *dev)
|
|||
XHCISlot *slot;
|
||||
int slotid;
|
||||
|
||||
for (slotid = 1; slotid <= MAXSLOTS; slotid++) {
|
||||
for (slotid = 1; slotid <= xhci->numslots; slotid++) {
|
||||
slot = &xhci->slots[slotid-1];
|
||||
if (slot->devaddr == dev->addr) {
|
||||
return slotid;
|
||||
|
@ -2948,6 +2980,19 @@ static int usb_xhci_initfn(struct PCIDevice *dev)
|
|||
|
||||
usb_xhci_init(xhci, &dev->qdev);
|
||||
|
||||
if (xhci->numintrs > MAXINTRS) {
|
||||
xhci->numintrs = MAXINTRS;
|
||||
}
|
||||
if (xhci->numintrs < 1) {
|
||||
xhci->numintrs = 1;
|
||||
}
|
||||
if (xhci->numslots > MAXSLOTS) {
|
||||
xhci->numslots = MAXSLOTS;
|
||||
}
|
||||
if (xhci->numslots < 1) {
|
||||
xhci->numslots = 1;
|
||||
}
|
||||
|
||||
xhci->mfwrap_timer = qemu_new_timer_ns(vm_clock, xhci_mfwrap_timer, xhci);
|
||||
|
||||
xhci->irq = xhci->pci_dev.irq[0];
|
||||
|
@ -2984,10 +3029,10 @@ static int usb_xhci_initfn(struct PCIDevice *dev)
|
|||
assert(ret >= 0);
|
||||
|
||||
if (xhci->flags & (1 << XHCI_FLAG_USE_MSI)) {
|
||||
msi_init(&xhci->pci_dev, 0x70, MAXINTRS, true, false);
|
||||
msi_init(&xhci->pci_dev, 0x70, xhci->numintrs, true, false);
|
||||
}
|
||||
if (xhci->flags & (1 << XHCI_FLAG_USE_MSI_X)) {
|
||||
msix_init(&xhci->pci_dev, MAXINTRS,
|
||||
msix_init(&xhci->pci_dev, xhci->numintrs,
|
||||
&xhci->mem, 0, OFF_MSIX_TABLE,
|
||||
&xhci->mem, 0, OFF_MSIX_PBA,
|
||||
0x90);
|
||||
|
@ -3002,10 +3047,12 @@ static const VMStateDescription vmstate_xhci = {
|
|||
};
|
||||
|
||||
static Property xhci_properties[] = {
|
||||
DEFINE_PROP_BIT("msi", XHCIState, flags, XHCI_FLAG_USE_MSI, true),
|
||||
DEFINE_PROP_BIT("msix", XHCIState, flags, XHCI_FLAG_USE_MSI_X, true),
|
||||
DEFINE_PROP_UINT32("p2", XHCIState, numports_2, 4),
|
||||
DEFINE_PROP_UINT32("p3", XHCIState, numports_3, 4),
|
||||
DEFINE_PROP_BIT("msi", XHCIState, flags, XHCI_FLAG_USE_MSI, true),
|
||||
DEFINE_PROP_BIT("msix", XHCIState, flags, XHCI_FLAG_USE_MSI_X, true),
|
||||
DEFINE_PROP_UINT32("intrs", XHCIState, numintrs, MAXINTRS),
|
||||
DEFINE_PROP_UINT32("slots", XHCIState, numslots, MAXSLOTS),
|
||||
DEFINE_PROP_UINT32("p2", XHCIState, numports_2, 4),
|
||||
DEFINE_PROP_UINT32("p3", XHCIState, numports_3, 4),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
|
|
|
@ -1224,7 +1224,8 @@ static int usb_linux_update_endp_table(USBHostDevice *s)
|
|||
usb_ep_set_type(&s->dev, pid, ep, type);
|
||||
usb_ep_set_ifnum(&s->dev, pid, ep, interface);
|
||||
if ((s->options & (1 << USB_HOST_OPT_PIPELINE)) &&
|
||||
(type == USB_ENDPOINT_XFER_BULK)) {
|
||||
(type == USB_ENDPOINT_XFER_BULK) &&
|
||||
(pid == USB_TOKEN_OUT)) {
|
||||
usb_ep_set_pipeline(&s->dev, pid, ep, true);
|
||||
}
|
||||
|
||||
|
|
|
@ -1270,6 +1270,16 @@ static void usbredir_interface_info(void *priv,
|
|||
}
|
||||
}
|
||||
|
||||
static void usbredir_set_pipeline(USBRedirDevice *dev, struct USBEndpoint *uep)
|
||||
{
|
||||
if (uep->type != USB_ENDPOINT_XFER_BULK) {
|
||||
return;
|
||||
}
|
||||
if (uep->pid == USB_TOKEN_OUT) {
|
||||
uep->pipeline = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void usbredir_ep_info(void *priv,
|
||||
struct usb_redir_ep_info_header *ep_info)
|
||||
{
|
||||
|
@ -1311,9 +1321,7 @@ static void usbredir_ep_info(void *priv,
|
|||
dev->endpoint[i].max_packet_size =
|
||||
usb_ep->max_packet_size = ep_info->max_packet_size[i];
|
||||
}
|
||||
if (ep_info->type[i] == usb_redir_type_bulk) {
|
||||
usb_ep->pipeline = true;
|
||||
}
|
||||
usbredir_set_pipeline(dev, usb_ep);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1574,9 +1582,7 @@ static int usbredir_post_load(void *priv, int version_id)
|
|||
usb_ep->type = dev->endpoint[i].type;
|
||||
usb_ep->ifnum = dev->endpoint[i].interface;
|
||||
usb_ep->max_packet_size = dev->endpoint[i].max_packet_size;
|
||||
if (dev->endpoint[i].type == usb_redir_type_bulk) {
|
||||
usb_ep->pipeline = true;
|
||||
}
|
||||
usbredir_set_pipeline(dev, usb_ep);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -248,7 +248,7 @@ static void versatile_init(ram_addr_t ram_size,
|
|||
pci_nic_init_nofail(nd, "rtl8139", NULL);
|
||||
}
|
||||
}
|
||||
if (usb_enabled) {
|
||||
if (usb_enabled(false)) {
|
||||
pci_create_simple(pci_bus, -1, "pci-ohci");
|
||||
}
|
||||
n = drive_get_max_bus(IF_SCSI);
|
||||
|
|
|
@ -150,10 +150,6 @@ int vhost_net_start(struct vhost_net *net,
|
|||
if (r < 0) {
|
||||
goto fail_notifiers;
|
||||
}
|
||||
if (net->dev.acked_features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
|
||||
tap_set_vnet_hdr_len(net->nc,
|
||||
sizeof(struct virtio_net_hdr_mrg_rxbuf));
|
||||
}
|
||||
|
||||
r = vhost_dev_start(&net->dev, dev);
|
||||
if (r < 0) {
|
||||
|
@ -179,9 +175,6 @@ fail:
|
|||
}
|
||||
net->nc->info->poll(net->nc, true);
|
||||
vhost_dev_stop(&net->dev, dev);
|
||||
if (net->dev.acked_features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
|
||||
tap_set_vnet_hdr_len(net->nc, sizeof(struct virtio_net_hdr));
|
||||
}
|
||||
fail_start:
|
||||
vhost_dev_disable_notifiers(&net->dev, dev);
|
||||
fail_notifiers:
|
||||
|
@ -199,18 +192,12 @@ void vhost_net_stop(struct vhost_net *net,
|
|||
}
|
||||
net->nc->info->poll(net->nc, true);
|
||||
vhost_dev_stop(&net->dev, dev);
|
||||
if (net->dev.acked_features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
|
||||
tap_set_vnet_hdr_len(net->nc, sizeof(struct virtio_net_hdr));
|
||||
}
|
||||
vhost_dev_disable_notifiers(&net->dev, dev);
|
||||
}
|
||||
|
||||
void vhost_net_cleanup(struct vhost_net *net)
|
||||
{
|
||||
vhost_dev_cleanup(&net->dev);
|
||||
if (net->dev.acked_features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
|
||||
tap_set_vnet_hdr_len(net->nc, sizeof(struct virtio_net_hdr));
|
||||
}
|
||||
g_free(net);
|
||||
}
|
||||
#else
|
||||
|
|
176
hw/virtio-net.c
176
hw/virtio-net.c
|
@ -41,6 +41,8 @@ typedef struct VirtIONet
|
|||
int32_t tx_burst;
|
||||
int tx_waiting;
|
||||
uint32_t has_vnet_hdr;
|
||||
size_t host_hdr_len;
|
||||
size_t guest_hdr_len;
|
||||
uint8_t has_ufo;
|
||||
struct {
|
||||
VirtQueueElement elem;
|
||||
|
@ -200,16 +202,19 @@ static void virtio_net_reset(VirtIODevice *vdev)
|
|||
memset(n->vlans, 0, MAX_VLAN >> 3);
|
||||
}
|
||||
|
||||
static int peer_has_vnet_hdr(VirtIONet *n)
|
||||
static void peer_test_vnet_hdr(VirtIONet *n)
|
||||
{
|
||||
if (!n->nic->nc.peer)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
if (n->nic->nc.peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
n->has_vnet_hdr = tap_has_vnet_hdr(n->nic->nc.peer);
|
||||
}
|
||||
|
||||
static int peer_has_vnet_hdr(VirtIONet *n)
|
||||
{
|
||||
return n->has_vnet_hdr;
|
||||
}
|
||||
|
||||
|
@ -223,15 +228,27 @@ static int peer_has_ufo(VirtIONet *n)
|
|||
return n->has_ufo;
|
||||
}
|
||||
|
||||
static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs)
|
||||
{
|
||||
n->mergeable_rx_bufs = mergeable_rx_bufs;
|
||||
|
||||
n->guest_hdr_len = n->mergeable_rx_bufs ?
|
||||
sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr);
|
||||
|
||||
if (peer_has_vnet_hdr(n) &&
|
||||
tap_has_vnet_hdr_len(n->nic->nc.peer, n->guest_hdr_len)) {
|
||||
tap_set_vnet_hdr_len(n->nic->nc.peer, n->guest_hdr_len);
|
||||
n->host_hdr_len = n->guest_hdr_len;
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t virtio_net_get_features(VirtIODevice *vdev, uint32_t features)
|
||||
{
|
||||
VirtIONet *n = to_virtio_net(vdev);
|
||||
|
||||
features |= (1 << VIRTIO_NET_F_MAC);
|
||||
|
||||
if (peer_has_vnet_hdr(n)) {
|
||||
tap_using_vnet_hdr(n->nic->nc.peer, 1);
|
||||
} else {
|
||||
if (!peer_has_vnet_hdr(n)) {
|
||||
features &= ~(0x1 << VIRTIO_NET_F_CSUM);
|
||||
features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO4);
|
||||
features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO6);
|
||||
|
@ -277,7 +294,7 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features)
|
|||
{
|
||||
VirtIONet *n = to_virtio_net(vdev);
|
||||
|
||||
n->mergeable_rx_bufs = !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF));
|
||||
virtio_net_set_mrg_rx_bufs(n, !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF)));
|
||||
|
||||
if (n->has_vnet_hdr) {
|
||||
tap_set_offload(n->nic->nc.peer,
|
||||
|
@ -499,41 +516,34 @@ static int virtio_net_has_buffers(VirtIONet *n, int bufsize)
|
|||
* cache.
|
||||
*/
|
||||
static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
|
||||
const uint8_t *buf, size_t size)
|
||||
uint8_t *buf, size_t size)
|
||||
{
|
||||
if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
|
||||
(size > 27 && size < 1500) && /* normal sized MTU */
|
||||
(buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
|
||||
(buf[23] == 17) && /* ip.protocol == UDP */
|
||||
(buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
|
||||
/* FIXME this cast is evil */
|
||||
net_checksum_calculate((uint8_t *)buf, size);
|
||||
net_checksum_calculate(buf, size);
|
||||
hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
|
||||
}
|
||||
}
|
||||
|
||||
static int receive_header(VirtIONet *n, struct iovec *iov, int iovcnt,
|
||||
const void *buf, size_t size, size_t hdr_len)
|
||||
static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
|
||||
const void *buf, size_t size)
|
||||
{
|
||||
struct virtio_net_hdr *hdr = (struct virtio_net_hdr *)iov[0].iov_base;
|
||||
int offset = 0;
|
||||
|
||||
hdr->flags = 0;
|
||||
hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
|
||||
|
||||
if (n->has_vnet_hdr) {
|
||||
memcpy(hdr, buf, sizeof(*hdr));
|
||||
offset = sizeof(*hdr);
|
||||
work_around_broken_dhclient(hdr, buf + offset, size - offset);
|
||||
/* FIXME this cast is evil */
|
||||
void *wbuf = (void *)buf;
|
||||
work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
|
||||
size - n->host_hdr_len);
|
||||
iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
|
||||
} else {
|
||||
struct virtio_net_hdr hdr = {
|
||||
.flags = 0,
|
||||
.gso_type = VIRTIO_NET_HDR_GSO_NONE
|
||||
};
|
||||
iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
|
||||
}
|
||||
|
||||
/* We only ever receive a struct virtio_net_hdr from the tapfd,
|
||||
* but we may be passing along a larger header to the guest.
|
||||
*/
|
||||
iov[0].iov_base += hdr_len;
|
||||
iov[0].iov_len -= hdr_len;
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
|
||||
|
@ -546,9 +556,7 @@ static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
|
|||
if (n->promisc)
|
||||
return 1;
|
||||
|
||||
if (n->has_vnet_hdr) {
|
||||
ptr += sizeof(struct virtio_net_hdr);
|
||||
}
|
||||
ptr += n->host_hdr_len;
|
||||
|
||||
if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
|
||||
int vid = be16_to_cpup((uint16_t *)(ptr + 14)) & 0xfff;
|
||||
|
@ -592,19 +600,16 @@ static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
|
|||
static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size)
|
||||
{
|
||||
VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque;
|
||||
struct virtio_net_hdr_mrg_rxbuf *mhdr = NULL;
|
||||
size_t guest_hdr_len, offset, i, host_hdr_len;
|
||||
struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
|
||||
struct virtio_net_hdr_mrg_rxbuf mhdr;
|
||||
unsigned mhdr_cnt = 0;
|
||||
size_t offset, i, guest_offset;
|
||||
|
||||
if (!virtio_net_can_receive(&n->nic->nc))
|
||||
return -1;
|
||||
|
||||
/* hdr_len refers to the header we supply to the guest */
|
||||
guest_hdr_len = n->mergeable_rx_bufs ?
|
||||
sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr);
|
||||
|
||||
|
||||
host_hdr_len = n->has_vnet_hdr ? sizeof(struct virtio_net_hdr) : 0;
|
||||
if (!virtio_net_has_buffers(n, size + guest_hdr_len - host_hdr_len))
|
||||
if (!virtio_net_has_buffers(n, size + n->guest_hdr_len - n->host_hdr_len))
|
||||
return 0;
|
||||
|
||||
if (!receive_filter(n, buf, size))
|
||||
|
@ -615,7 +620,7 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t
|
|||
while (offset < size) {
|
||||
VirtQueueElement elem;
|
||||
int len, total;
|
||||
struct iovec sg[VIRTQUEUE_MAX_SIZE];
|
||||
const struct iovec *sg = elem.in_sg;
|
||||
|
||||
total = 0;
|
||||
|
||||
|
@ -626,7 +631,7 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t
|
|||
"i %zd mergeable %d offset %zd, size %zd, "
|
||||
"guest hdr len %zd, host hdr len %zd guest features 0x%x",
|
||||
i, n->mergeable_rx_bufs, offset, size,
|
||||
guest_hdr_len, host_hdr_len, n->vdev.guest_features);
|
||||
n->guest_hdr_len, n->host_hdr_len, n->vdev.guest_features);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
@ -635,24 +640,25 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t
|
|||
exit(1);
|
||||
}
|
||||
|
||||
if (!n->mergeable_rx_bufs && elem.in_sg[0].iov_len != guest_hdr_len) {
|
||||
error_report("virtio-net header not in first element");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
memcpy(&sg, &elem.in_sg[0], sizeof(sg[0]) * elem.in_num);
|
||||
|
||||
if (i == 0) {
|
||||
if (n->mergeable_rx_bufs)
|
||||
mhdr = (struct virtio_net_hdr_mrg_rxbuf *)sg[0].iov_base;
|
||||
assert(offset == 0);
|
||||
if (n->mergeable_rx_bufs) {
|
||||
mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
|
||||
sg, elem.in_num,
|
||||
offsetof(typeof(mhdr), num_buffers),
|
||||
sizeof(mhdr.num_buffers));
|
||||
}
|
||||
|
||||
offset += receive_header(n, sg, elem.in_num,
|
||||
buf + offset, size - offset, guest_hdr_len);
|
||||
total += guest_hdr_len;
|
||||
receive_header(n, sg, elem.in_num, buf, size);
|
||||
offset = n->host_hdr_len;
|
||||
total += n->guest_hdr_len;
|
||||
guest_offset = n->guest_hdr_len;
|
||||
} else {
|
||||
guest_offset = 0;
|
||||
}
|
||||
|
||||
/* copy in packet. ugh */
|
||||
len = iov_from_buf(sg, elem.in_num, 0,
|
||||
len = iov_from_buf(sg, elem.in_num, guest_offset,
|
||||
buf + offset, size - offset);
|
||||
total += len;
|
||||
offset += len;
|
||||
|
@ -665,7 +671,7 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t
|
|||
"i %zd mergeable %d offset %zd, size %zd, "
|
||||
"guest hdr len %zd, host hdr len %zd",
|
||||
i, n->mergeable_rx_bufs,
|
||||
offset, size, guest_hdr_len, host_hdr_len);
|
||||
offset, size, n->guest_hdr_len, n->host_hdr_len);
|
||||
#endif
|
||||
return size;
|
||||
}
|
||||
|
@ -674,8 +680,11 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t
|
|||
virtqueue_fill(n->rx_vq, &elem, total, i++);
|
||||
}
|
||||
|
||||
if (mhdr) {
|
||||
stw_p(&mhdr->num_buffers, i);
|
||||
if (mhdr_cnt) {
|
||||
stw_p(&mhdr.num_buffers, i);
|
||||
iov_from_buf(mhdr_sg, mhdr_cnt,
|
||||
0,
|
||||
&mhdr.num_buffers, sizeof mhdr.num_buffers);
|
||||
}
|
||||
|
||||
virtqueue_flush(n->rx_vq, i);
|
||||
|
@ -716,33 +725,35 @@ static int32_t virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq)
|
|||
}
|
||||
|
||||
while (virtqueue_pop(vq, &elem)) {
|
||||
ssize_t ret, len = 0;
|
||||
ssize_t ret, len;
|
||||
unsigned int out_num = elem.out_num;
|
||||
struct iovec *out_sg = &elem.out_sg[0];
|
||||
unsigned hdr_len;
|
||||
struct iovec sg[VIRTQUEUE_MAX_SIZE];
|
||||
|
||||
/* hdr_len refers to the header received from the guest */
|
||||
hdr_len = n->mergeable_rx_bufs ?
|
||||
sizeof(struct virtio_net_hdr_mrg_rxbuf) :
|
||||
sizeof(struct virtio_net_hdr);
|
||||
|
||||
if (out_num < 1 || out_sg->iov_len != hdr_len) {
|
||||
if (out_num < 1) {
|
||||
error_report("virtio-net header not in first element");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* ignore the header if GSO is not supported */
|
||||
if (!n->has_vnet_hdr) {
|
||||
out_num--;
|
||||
out_sg++;
|
||||
len += hdr_len;
|
||||
} else if (n->mergeable_rx_bufs) {
|
||||
/* tapfd expects a struct virtio_net_hdr */
|
||||
hdr_len -= sizeof(struct virtio_net_hdr);
|
||||
out_sg->iov_len -= hdr_len;
|
||||
len += hdr_len;
|
||||
/*
|
||||
* If host wants to see the guest header as is, we can
|
||||
* pass it on unchanged. Otherwise, copy just the parts
|
||||
* that host is interested in.
|
||||
*/
|
||||
assert(n->host_hdr_len <= n->guest_hdr_len);
|
||||
if (n->host_hdr_len != n->guest_hdr_len) {
|
||||
unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
|
||||
out_sg, out_num,
|
||||
0, n->host_hdr_len);
|
||||
sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
|
||||
out_sg, out_num,
|
||||
n->guest_hdr_len, -1);
|
||||
out_num = sg_num;
|
||||
out_sg = sg;
|
||||
}
|
||||
|
||||
len = n->guest_hdr_len;
|
||||
|
||||
ret = qemu_sendv_packet_async(&n->nic->nc, out_sg, out_num,
|
||||
virtio_net_tx_complete);
|
||||
if (ret == 0) {
|
||||
|
@ -899,7 +910,8 @@ static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
|
|||
|
||||
qemu_get_buffer(f, n->mac, ETH_ALEN);
|
||||
n->tx_waiting = qemu_get_be32(f);
|
||||
n->mergeable_rx_bufs = qemu_get_be32(f);
|
||||
|
||||
virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f));
|
||||
|
||||
if (version_id >= 3)
|
||||
n->status = qemu_get_be16(f);
|
||||
|
@ -939,7 +951,6 @@ static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
|
|||
}
|
||||
|
||||
if (n->has_vnet_hdr) {
|
||||
tap_using_vnet_hdr(n->nic->nc.peer, 1);
|
||||
tap_set_offload(n->nic->nc.peer,
|
||||
(n->vdev.guest_features >> VIRTIO_NET_F_GUEST_CSUM) & 1,
|
||||
(n->vdev.guest_features >> VIRTIO_NET_F_GUEST_TSO4) & 1,
|
||||
|
@ -1038,12 +1049,19 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf,
|
|||
n->status = VIRTIO_NET_S_LINK_UP;
|
||||
|
||||
n->nic = qemu_new_nic(&net_virtio_info, conf, object_get_typename(OBJECT(dev)), dev->id, n);
|
||||
peer_test_vnet_hdr(n);
|
||||
if (peer_has_vnet_hdr(n)) {
|
||||
tap_using_vnet_hdr(n->nic->nc.peer, 1);
|
||||
n->host_hdr_len = sizeof(struct virtio_net_hdr);
|
||||
} else {
|
||||
n->host_hdr_len = 0;
|
||||
}
|
||||
|
||||
qemu_format_nic_info_str(&n->nic->nc, conf->macaddr.a);
|
||||
|
||||
n->tx_waiting = 0;
|
||||
n->tx_burst = net->txburst;
|
||||
n->mergeable_rx_bufs = 0;
|
||||
virtio_net_set_mrg_rx_bufs(n, 0);
|
||||
n->promisc = 1; /* for compatibility */
|
||||
|
||||
n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
|
||||
|
|
128
hw/virtio-pci.c
128
hw/virtio-pci.c
|
@ -374,52 +374,39 @@ static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static uint32_t virtio_pci_config_readb(void *opaque, uint32_t addr)
|
||||
static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr,
|
||||
unsigned size)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = opaque;
|
||||
uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
|
||||
if (addr < config)
|
||||
uint64_t val = 0;
|
||||
if (addr < config) {
|
||||
return virtio_ioport_read(proxy, addr);
|
||||
}
|
||||
addr -= config;
|
||||
return virtio_config_readb(proxy->vdev, addr);
|
||||
}
|
||||
|
||||
static uint32_t virtio_pci_config_readw(void *opaque, uint32_t addr)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = opaque;
|
||||
uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
|
||||
uint16_t val;
|
||||
if (addr < config)
|
||||
return virtio_ioport_read(proxy, addr);
|
||||
addr -= config;
|
||||
val = virtio_config_readw(proxy->vdev, addr);
|
||||
if (virtio_is_big_endian()) {
|
||||
/*
|
||||
* virtio is odd, ioports are LE but config space is target native
|
||||
* endian. However, in qemu, all PIO is LE, so we need to re-swap
|
||||
* on BE targets
|
||||
*/
|
||||
val = bswap16(val);
|
||||
switch (size) {
|
||||
case 1:
|
||||
val = virtio_config_readb(proxy->vdev, addr);
|
||||
break;
|
||||
case 2:
|
||||
val = virtio_config_readw(proxy->vdev, addr);
|
||||
if (virtio_is_big_endian()) {
|
||||
val = bswap16(val);
|
||||
}
|
||||
break;
|
||||
case 4:
|
||||
val = virtio_config_readl(proxy->vdev, addr);
|
||||
if (virtio_is_big_endian()) {
|
||||
val = bswap32(val);
|
||||
}
|
||||
break;
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
static uint32_t virtio_pci_config_readl(void *opaque, uint32_t addr)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = opaque;
|
||||
uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
|
||||
uint32_t val;
|
||||
if (addr < config)
|
||||
return virtio_ioport_read(proxy, addr);
|
||||
addr -= config;
|
||||
val = virtio_config_readl(proxy->vdev, addr);
|
||||
if (virtio_is_big_endian()) {
|
||||
val = bswap32(val);
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
static void virtio_pci_config_writeb(void *opaque, uint32_t addr, uint32_t val)
|
||||
static void virtio_pci_config_write(void *opaque, hwaddr addr,
|
||||
uint64_t val, unsigned size)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = opaque;
|
||||
uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
|
||||
|
@ -428,51 +415,36 @@ static void virtio_pci_config_writeb(void *opaque, uint32_t addr, uint32_t val)
|
|||
return;
|
||||
}
|
||||
addr -= config;
|
||||
virtio_config_writeb(proxy->vdev, addr, val);
|
||||
/*
|
||||
* Virtio-PCI is odd. Ioports are LE but config space is target native
|
||||
* endian.
|
||||
*/
|
||||
switch (size) {
|
||||
case 1:
|
||||
virtio_config_writeb(proxy->vdev, addr, val);
|
||||
break;
|
||||
case 2:
|
||||
if (virtio_is_big_endian()) {
|
||||
val = bswap16(val);
|
||||
}
|
||||
virtio_config_writew(proxy->vdev, addr, val);
|
||||
break;
|
||||
case 4:
|
||||
if (virtio_is_big_endian()) {
|
||||
val = bswap32(val);
|
||||
}
|
||||
virtio_config_writel(proxy->vdev, addr, val);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_pci_config_writew(void *opaque, uint32_t addr, uint32_t val)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = opaque;
|
||||
uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
|
||||
if (addr < config) {
|
||||
virtio_ioport_write(proxy, addr, val);
|
||||
return;
|
||||
}
|
||||
addr -= config;
|
||||
if (virtio_is_big_endian()) {
|
||||
val = bswap16(val);
|
||||
}
|
||||
virtio_config_writew(proxy->vdev, addr, val);
|
||||
}
|
||||
|
||||
static void virtio_pci_config_writel(void *opaque, uint32_t addr, uint32_t val)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = opaque;
|
||||
uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
|
||||
if (addr < config) {
|
||||
virtio_ioport_write(proxy, addr, val);
|
||||
return;
|
||||
}
|
||||
addr -= config;
|
||||
if (virtio_is_big_endian()) {
|
||||
val = bswap32(val);
|
||||
}
|
||||
virtio_config_writel(proxy->vdev, addr, val);
|
||||
}
|
||||
|
||||
static const MemoryRegionPortio virtio_portio[] = {
|
||||
{ 0, 0x10000, 1, .write = virtio_pci_config_writeb, },
|
||||
{ 0, 0x10000, 2, .write = virtio_pci_config_writew, },
|
||||
{ 0, 0x10000, 4, .write = virtio_pci_config_writel, },
|
||||
{ 0, 0x10000, 1, .read = virtio_pci_config_readb, },
|
||||
{ 0, 0x10000, 2, .read = virtio_pci_config_readw, },
|
||||
{ 0, 0x10000, 4, .read = virtio_pci_config_readl, },
|
||||
PORTIO_END_OF_LIST()
|
||||
};
|
||||
|
||||
static const MemoryRegionOps virtio_pci_config_ops = {
|
||||
.old_portio = virtio_portio,
|
||||
.read = virtio_pci_config_read,
|
||||
.write = virtio_pci_config_write,
|
||||
.impl = {
|
||||
.min_access_size = 1,
|
||||
.max_access_size = 4,
|
||||
},
|
||||
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||
};
|
||||
|
||||
|
|
21
hw/vmport.c
21
hw/vmport.c
|
@ -54,7 +54,8 @@ void vmport_register(unsigned char command, IOPortReadFunc *func, void *opaque)
|
|||
port_state->opaque[command] = opaque;
|
||||
}
|
||||
|
||||
static uint32_t vmport_ioport_read(void *opaque, uint32_t addr)
|
||||
static uint64_t vmport_ioport_read(void *opaque, hwaddr addr,
|
||||
unsigned size)
|
||||
{
|
||||
VMPortState *s = opaque;
|
||||
CPUX86State *env = cpu_single_env;
|
||||
|
@ -81,11 +82,12 @@ static uint32_t vmport_ioport_read(void *opaque, uint32_t addr)
|
|||
return s->func[command](s->opaque[command], addr);
|
||||
}
|
||||
|
||||
static void vmport_ioport_write(void *opaque, uint32_t addr, uint32_t val)
|
||||
static void vmport_ioport_write(void *opaque, hwaddr addr,
|
||||
uint64_t val, unsigned size)
|
||||
{
|
||||
CPUX86State *env = cpu_single_env;
|
||||
|
||||
env->regs[R_EAX] = vmport_ioport_read(opaque, addr);
|
||||
env->regs[R_EAX] = vmport_ioport_read(opaque, addr, 4);
|
||||
}
|
||||
|
||||
static uint32_t vmport_cmd_get_version(void *opaque, uint32_t addr)
|
||||
|
@ -121,13 +123,14 @@ void vmmouse_set_data(const uint32_t *data)
|
|||
env->regs[R_ESI] = data[4]; env->regs[R_EDI] = data[5];
|
||||
}
|
||||
|
||||
static const MemoryRegionPortio vmport_portio[] = {
|
||||
{0, 1, 4, .read = vmport_ioport_read, .write = vmport_ioport_write },
|
||||
PORTIO_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static const MemoryRegionOps vmport_ops = {
|
||||
.old_portio = vmport_portio
|
||||
.read = vmport_ioport_read,
|
||||
.write = vmport_ioport_write,
|
||||
.impl = {
|
||||
.min_access_size = 4,
|
||||
.max_access_size = 4,
|
||||
},
|
||||
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||
};
|
||||
|
||||
static int vmport_initfn(ISADevice *dev)
|
||||
|
|
|
@ -228,18 +228,46 @@ static void platform_fixed_ioport_reset(void *opaque)
|
|||
platform_fixed_ioport_writeb(s, 0, 0);
|
||||
}
|
||||
|
||||
const MemoryRegionPortio xen_platform_ioport[] = {
|
||||
{ 0, 16, 4, .write = platform_fixed_ioport_writel, },
|
||||
{ 0, 16, 2, .write = platform_fixed_ioport_writew, },
|
||||
{ 0, 16, 1, .write = platform_fixed_ioport_writeb, },
|
||||
{ 0, 16, 2, .read = platform_fixed_ioport_readw, },
|
||||
{ 0, 16, 1, .read = platform_fixed_ioport_readb, },
|
||||
PORTIO_END_OF_LIST()
|
||||
};
|
||||
static uint64_t platform_fixed_ioport_read(void *opaque,
|
||||
hwaddr addr,
|
||||
unsigned size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
return platform_fixed_ioport_readb(opaque, addr);
|
||||
case 2:
|
||||
return platform_fixed_ioport_readw(opaque, addr);
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
static void platform_fixed_ioport_write(void *opaque, hwaddr addr,
|
||||
|
||||
uint64_t val, unsigned size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
platform_fixed_ioport_writeb(opaque, addr, val);
|
||||
break;
|
||||
case 2:
|
||||
platform_fixed_ioport_writew(opaque, addr, val);
|
||||
break;
|
||||
case 4:
|
||||
platform_fixed_ioport_writel(opaque, addr, val);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static const MemoryRegionOps platform_fixed_io_ops = {
|
||||
.old_portio = xen_platform_ioport,
|
||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
.read = platform_fixed_ioport_read,
|
||||
.write = platform_fixed_ioport_write,
|
||||
.impl = {
|
||||
.min_access_size = 1,
|
||||
.max_access_size = 4,
|
||||
},
|
||||
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||
};
|
||||
|
||||
static void platform_fixed_ioport_init(PCIXenPlatformState* s)
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "qemu-log.h"
|
||||
#include "fifo.h"
|
||||
#include "ssi.h"
|
||||
#include "bitops.h"
|
||||
|
||||
#ifdef XILINX_SPIPS_ERR_DEBUG
|
||||
#define DB_PRINT(...) do { \
|
||||
|
@ -40,6 +41,8 @@
|
|||
|
||||
/* config register */
|
||||
#define R_CONFIG (0x00 / 4)
|
||||
#define IFMODE (1 << 31)
|
||||
#define ENDIAN (1 << 26)
|
||||
#define MODEFAIL_GEN_EN (1 << 17)
|
||||
#define MAN_START_COM (1 << 16)
|
||||
#define MAN_START_EN (1 << 15)
|
||||
|
@ -75,45 +78,101 @@
|
|||
#define R_SLAVE_IDLE_COUNT (0x24 / 4)
|
||||
#define R_TX_THRES (0x28 / 4)
|
||||
#define R_RX_THRES (0x2C / 4)
|
||||
#define R_TXD1 (0x80 / 4)
|
||||
#define R_TXD2 (0x84 / 4)
|
||||
#define R_TXD3 (0x88 / 4)
|
||||
|
||||
#define R_LQSPI_CFG (0xa0 / 4)
|
||||
#define R_LQSPI_CFG_RESET 0x03A002EB
|
||||
#define LQSPI_CFG_LQ_MODE (1 << 31)
|
||||
#define LQSPI_CFG_TWO_MEM (1 << 30)
|
||||
#define LQSPI_CFG_SEP_BUS (1 << 30)
|
||||
#define LQSPI_CFG_U_PAGE (1 << 28)
|
||||
#define LQSPI_CFG_MODE_EN (1 << 25)
|
||||
#define LQSPI_CFG_MODE_WIDTH 8
|
||||
#define LQSPI_CFG_MODE_SHIFT 16
|
||||
#define LQSPI_CFG_DUMMY_WIDTH 3
|
||||
#define LQSPI_CFG_DUMMY_SHIFT 8
|
||||
#define LQSPI_CFG_INST_CODE 0xFF
|
||||
|
||||
#define R_LQSPI_STS (0xA4 / 4)
|
||||
#define LQSPI_STS_WR_RECVD (1 << 1)
|
||||
|
||||
#define R_MOD_ID (0xFC / 4)
|
||||
|
||||
#define R_MAX (R_MOD_ID+1)
|
||||
|
||||
/* size of TXRX FIFOs */
|
||||
#define NUM_CS_LINES 4
|
||||
#define RXFF_A 32
|
||||
#define TXFF_A 32
|
||||
|
||||
/* 16MB per linear region */
|
||||
#define LQSPI_ADDRESS_BITS 24
|
||||
/* Bite off 4k chunks at a time */
|
||||
#define LQSPI_CACHE_SIZE 1024
|
||||
|
||||
#define SNOOP_CHECKING 0xFF
|
||||
#define SNOOP_NONE 0xFE
|
||||
#define SNOOP_STRIPING 0
|
||||
|
||||
typedef struct {
|
||||
SysBusDevice busdev;
|
||||
MemoryRegion iomem;
|
||||
MemoryRegion mmlqspi;
|
||||
|
||||
qemu_irq irq;
|
||||
int irqline;
|
||||
|
||||
qemu_irq cs_lines[NUM_CS_LINES];
|
||||
SSIBus *spi;
|
||||
uint8_t num_cs;
|
||||
uint8_t num_busses;
|
||||
|
||||
uint8_t snoop_state;
|
||||
qemu_irq *cs_lines;
|
||||
SSIBus **spi;
|
||||
|
||||
Fifo8 rx_fifo;
|
||||
Fifo8 tx_fifo;
|
||||
|
||||
uint8_t num_txrx_bytes;
|
||||
|
||||
uint32_t regs[R_MAX];
|
||||
|
||||
uint32_t lqspi_buf[LQSPI_CACHE_SIZE];
|
||||
hwaddr lqspi_cached_addr;
|
||||
} XilinxSPIPS;
|
||||
|
||||
static inline int num_effective_busses(XilinxSPIPS *s)
|
||||
{
|
||||
return (s->regs[R_LQSPI_STS] & LQSPI_CFG_SEP_BUS &&
|
||||
s->regs[R_LQSPI_STS] & LQSPI_CFG_TWO_MEM) ? s->num_busses : 1;
|
||||
}
|
||||
|
||||
static void xilinx_spips_update_cs_lines(XilinxSPIPS *s)
|
||||
{
|
||||
int i;
|
||||
int i, j;
|
||||
bool found = false;
|
||||
int field = s->regs[R_CONFIG] >> CS_SHIFT;
|
||||
|
||||
for (i = 0; i < NUM_CS_LINES; i++) {
|
||||
if (~field & (1 << i) && !found) {
|
||||
found = true;
|
||||
DB_PRINT("selecting slave %d\n", i);
|
||||
qemu_set_irq(s->cs_lines[i], 0);
|
||||
} else {
|
||||
qemu_set_irq(s->cs_lines[i], 1);
|
||||
for (i = 0; i < s->num_cs; i++) {
|
||||
for (j = 0; j < num_effective_busses(s); j++) {
|
||||
int upage = !!(s->regs[R_LQSPI_STS] & LQSPI_CFG_U_PAGE);
|
||||
int cs_to_set = (j * s->num_cs + i + upage) %
|
||||
(s->num_cs * s->num_busses);
|
||||
|
||||
if (~field & (1 << i) && !found) {
|
||||
DB_PRINT("selecting slave %d\n", i);
|
||||
qemu_set_irq(s->cs_lines[cs_to_set], 0);
|
||||
} else {
|
||||
qemu_set_irq(s->cs_lines[cs_to_set], 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (~field & (1 << i)) {
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
s->snoop_state = SNOOP_CHECKING;
|
||||
}
|
||||
}
|
||||
|
||||
static void xilinx_spips_update_ixr(XilinxSPIPS *s)
|
||||
|
@ -154,6 +213,8 @@ static void xilinx_spips_reset(DeviceState *d)
|
|||
s->regs[R_RX_THRES] = 1;
|
||||
/* FIXME: move magic number definition somewhere sensible */
|
||||
s->regs[R_MOD_ID] = 0x01090106;
|
||||
s->regs[R_LQSPI_CFG] = R_LQSPI_CFG_RESET;
|
||||
s->snoop_state = SNOOP_CHECKING;
|
||||
xilinx_spips_update_ixr(s);
|
||||
xilinx_spips_update_cs_lines(s);
|
||||
}
|
||||
|
@ -161,26 +222,68 @@ static void xilinx_spips_reset(DeviceState *d)
|
|||
static void xilinx_spips_flush_txfifo(XilinxSPIPS *s)
|
||||
{
|
||||
for (;;) {
|
||||
uint32_t r;
|
||||
uint8_t value;
|
||||
int i;
|
||||
uint8_t rx;
|
||||
uint8_t tx = 0;
|
||||
|
||||
if (fifo8_is_empty(&s->tx_fifo)) {
|
||||
s->regs[R_INTR_STATUS] |= IXR_TX_FIFO_UNDERFLOW;
|
||||
break;
|
||||
} else {
|
||||
value = fifo8_pop(&s->tx_fifo);
|
||||
for (i = 0; i < num_effective_busses(s); ++i) {
|
||||
if (!i || s->snoop_state == SNOOP_STRIPING) {
|
||||
if (fifo8_is_empty(&s->tx_fifo)) {
|
||||
s->regs[R_INTR_STATUS] |= IXR_TX_FIFO_UNDERFLOW;
|
||||
xilinx_spips_update_ixr(s);
|
||||
return;
|
||||
} else {
|
||||
tx = fifo8_pop(&s->tx_fifo);
|
||||
}
|
||||
}
|
||||
rx = ssi_transfer(s->spi[i], (uint32_t)tx);
|
||||
DB_PRINT("tx = %02x rx = %02x\n", tx, rx);
|
||||
if (!i || s->snoop_state == SNOOP_STRIPING) {
|
||||
if (fifo8_is_full(&s->rx_fifo)) {
|
||||
s->regs[R_INTR_STATUS] |= IXR_RX_FIFO_OVERFLOW;
|
||||
DB_PRINT("rx FIFO overflow");
|
||||
} else {
|
||||
fifo8_push(&s->rx_fifo, (uint8_t)rx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r = ssi_transfer(s->spi, (uint32_t)value);
|
||||
DB_PRINT("tx = %02x rx = %02x\n", value, r);
|
||||
if (fifo8_is_full(&s->rx_fifo)) {
|
||||
s->regs[R_INTR_STATUS] |= IXR_RX_FIFO_OVERFLOW;
|
||||
DB_PRINT("rx FIFO overflow");
|
||||
} else {
|
||||
fifo8_push(&s->rx_fifo, (uint8_t)r);
|
||||
switch (s->snoop_state) {
|
||||
case (SNOOP_CHECKING):
|
||||
switch (tx) { /* new instruction code */
|
||||
case 0x0b: /* dual/quad output read DOR/QOR */
|
||||
case 0x6b:
|
||||
s->snoop_state = 4;
|
||||
break;
|
||||
/* FIXME: these vary between vendor - set to spansion */
|
||||
case 0xbb: /* high performance dual read DIOR */
|
||||
s->snoop_state = 4;
|
||||
break;
|
||||
case 0xeb: /* high performance quad read QIOR */
|
||||
s->snoop_state = 6;
|
||||
break;
|
||||
default:
|
||||
s->snoop_state = SNOOP_NONE;
|
||||
}
|
||||
break;
|
||||
case (SNOOP_STRIPING):
|
||||
case (SNOOP_NONE):
|
||||
break;
|
||||
default:
|
||||
s->snoop_state--;
|
||||
}
|
||||
}
|
||||
xilinx_spips_update_ixr(s);
|
||||
}
|
||||
|
||||
static inline void rx_data_bytes(XilinxSPIPS *s, uint32_t *value, int max)
|
||||
{
|
||||
int i;
|
||||
|
||||
*value = 0;
|
||||
for (i = 0; i < max && !fifo8_is_empty(&s->rx_fifo); ++i) {
|
||||
uint32_t next = fifo8_pop(&s->rx_fifo) & 0xFF;
|
||||
*value |= next << 8 * (s->regs[R_CONFIG] & ENDIAN ? 3-i : i);
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t xilinx_spips_read(void *opaque, hwaddr addr,
|
||||
|
@ -214,7 +317,7 @@ static uint64_t xilinx_spips_read(void *opaque, hwaddr addr,
|
|||
mask = 0;
|
||||
break;
|
||||
case R_RX_DATA:
|
||||
ret = (uint32_t)fifo8_pop(&s->rx_fifo);
|
||||
rx_data_bytes(s, &ret, s->num_txrx_bytes);
|
||||
DB_PRINT("addr=" TARGET_FMT_plx " = %x\n", addr * 4, ret);
|
||||
xilinx_spips_update_ixr(s);
|
||||
return ret;
|
||||
|
@ -224,6 +327,20 @@ static uint64_t xilinx_spips_read(void *opaque, hwaddr addr,
|
|||
|
||||
}
|
||||
|
||||
static inline void tx_data_bytes(XilinxSPIPS *s, uint32_t value, int num)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < num && !fifo8_is_full(&s->tx_fifo); ++i) {
|
||||
if (s->regs[R_CONFIG] & ENDIAN) {
|
||||
fifo8_push(&s->tx_fifo, (uint8_t)(value >> 24));
|
||||
value <<= 8;
|
||||
} else {
|
||||
fifo8_push(&s->tx_fifo, (uint8_t)value);
|
||||
value >>= 8;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void xilinx_spips_write(void *opaque, hwaddr addr,
|
||||
uint64_t value, unsigned size)
|
||||
{
|
||||
|
@ -264,7 +381,16 @@ static void xilinx_spips_write(void *opaque, hwaddr addr,
|
|||
mask = 0;
|
||||
break;
|
||||
case R_TX_DATA:
|
||||
fifo8_push(&s->tx_fifo, (uint8_t)value);
|
||||
tx_data_bytes(s, (uint32_t)value, s->num_txrx_bytes);
|
||||
goto no_reg_update;
|
||||
case R_TXD1:
|
||||
tx_data_bytes(s, (uint32_t)value, 1);
|
||||
goto no_reg_update;
|
||||
case R_TXD2:
|
||||
tx_data_bytes(s, (uint32_t)value, 2);
|
||||
goto no_reg_update;
|
||||
case R_TXD3:
|
||||
tx_data_bytes(s, (uint32_t)value, 3);
|
||||
goto no_reg_update;
|
||||
}
|
||||
s->regs[addr] = (s->regs[addr] & ~mask) | (value & mask);
|
||||
|
@ -282,6 +408,81 @@ static const MemoryRegionOps spips_ops = {
|
|||
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||
};
|
||||
|
||||
#define LQSPI_CACHE_SIZE 1024
|
||||
|
||||
static uint64_t
|
||||
lqspi_read(void *opaque, hwaddr addr, unsigned int size)
|
||||
{
|
||||
int i;
|
||||
XilinxSPIPS *s = opaque;
|
||||
|
||||
if (addr >= s->lqspi_cached_addr &&
|
||||
addr <= s->lqspi_cached_addr + LQSPI_CACHE_SIZE - 4) {
|
||||
return s->lqspi_buf[(addr - s->lqspi_cached_addr) >> 2];
|
||||
} else {
|
||||
int flash_addr = (addr / num_effective_busses(s));
|
||||
int slave = flash_addr >> LQSPI_ADDRESS_BITS;
|
||||
int cache_entry = 0;
|
||||
|
||||
DB_PRINT("config reg status: %08x\n", s->regs[R_LQSPI_CFG]);
|
||||
|
||||
fifo8_reset(&s->tx_fifo);
|
||||
fifo8_reset(&s->rx_fifo);
|
||||
|
||||
s->regs[R_CONFIG] &= ~CS;
|
||||
s->regs[R_CONFIG] |= (~(1 << slave) << CS_SHIFT) & CS;
|
||||
xilinx_spips_update_cs_lines(s);
|
||||
|
||||
/* instruction */
|
||||
DB_PRINT("pushing read instruction: %02x\n",
|
||||
(uint8_t)(s->regs[R_LQSPI_CFG] & LQSPI_CFG_INST_CODE));
|
||||
fifo8_push(&s->tx_fifo, s->regs[R_LQSPI_CFG] & LQSPI_CFG_INST_CODE);
|
||||
/* read address */
|
||||
DB_PRINT("pushing read address %06x\n", flash_addr);
|
||||
fifo8_push(&s->tx_fifo, (uint8_t)(flash_addr >> 16));
|
||||
fifo8_push(&s->tx_fifo, (uint8_t)(flash_addr >> 8));
|
||||
fifo8_push(&s->tx_fifo, (uint8_t)flash_addr);
|
||||
/* mode bits */
|
||||
if (s->regs[R_LQSPI_CFG] & LQSPI_CFG_MODE_EN) {
|
||||
fifo8_push(&s->tx_fifo, extract32(s->regs[R_LQSPI_CFG],
|
||||
LQSPI_CFG_MODE_SHIFT,
|
||||
LQSPI_CFG_MODE_WIDTH));
|
||||
}
|
||||
/* dummy bytes */
|
||||
for (i = 0; i < (extract32(s->regs[R_LQSPI_CFG], LQSPI_CFG_DUMMY_SHIFT,
|
||||
LQSPI_CFG_DUMMY_WIDTH)); ++i) {
|
||||
DB_PRINT("pushing dummy byte\n");
|
||||
fifo8_push(&s->tx_fifo, 0);
|
||||
}
|
||||
xilinx_spips_flush_txfifo(s);
|
||||
fifo8_reset(&s->rx_fifo);
|
||||
|
||||
DB_PRINT("starting QSPI data read\n");
|
||||
|
||||
for (i = 0; i < LQSPI_CACHE_SIZE / 4; ++i) {
|
||||
tx_data_bytes(s, 0, 4);
|
||||
xilinx_spips_flush_txfifo(s);
|
||||
rx_data_bytes(s, &s->lqspi_buf[cache_entry], 4);
|
||||
cache_entry++;
|
||||
}
|
||||
|
||||
s->regs[R_CONFIG] |= CS;
|
||||
xilinx_spips_update_cs_lines(s);
|
||||
|
||||
s->lqspi_cached_addr = addr;
|
||||
return lqspi_read(opaque, addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
static const MemoryRegionOps lqspi_ops = {
|
||||
.read = lqspi_read,
|
||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
.valid = {
|
||||
.min_access_size = 4,
|
||||
.max_access_size = 4
|
||||
}
|
||||
};
|
||||
|
||||
static int xilinx_spips_init(SysBusDevice *dev)
|
||||
{
|
||||
XilinxSPIPS *s = FROM_SYSBUS(typeof(*s), dev);
|
||||
|
@ -289,18 +490,30 @@ static int xilinx_spips_init(SysBusDevice *dev)
|
|||
|
||||
DB_PRINT("inited device model\n");
|
||||
|
||||
s->spi = ssi_create_bus(&dev->qdev, "spi");
|
||||
s->spi = g_new(SSIBus *, s->num_busses);
|
||||
for (i = 0; i < s->num_busses; ++i) {
|
||||
char bus_name[16];
|
||||
snprintf(bus_name, 16, "spi%d", i);
|
||||
s->spi[i] = ssi_create_bus(&dev->qdev, bus_name);
|
||||
}
|
||||
|
||||
ssi_auto_connect_slaves(DEVICE(s), s->cs_lines, s->spi);
|
||||
s->cs_lines = g_new(qemu_irq, s->num_cs * s->num_busses);
|
||||
ssi_auto_connect_slaves(DEVICE(s), s->cs_lines, s->spi[0]);
|
||||
ssi_auto_connect_slaves(DEVICE(s), s->cs_lines, s->spi[1]);
|
||||
sysbus_init_irq(dev, &s->irq);
|
||||
for (i = 0; i < NUM_CS_LINES; ++i) {
|
||||
for (i = 0; i < s->num_cs * s->num_busses; ++i) {
|
||||
sysbus_init_irq(dev, &s->cs_lines[i]);
|
||||
}
|
||||
|
||||
memory_region_init_io(&s->iomem, &spips_ops, s, "spi", R_MAX*4);
|
||||
sysbus_init_mmio(dev, &s->iomem);
|
||||
|
||||
memory_region_init_io(&s->mmlqspi, &lqspi_ops, s, "lqspi",
|
||||
(1 << LQSPI_ADDRESS_BITS) * 2);
|
||||
sysbus_init_mmio(dev, &s->mmlqspi);
|
||||
|
||||
s->irqline = -1;
|
||||
s->lqspi_cached_addr = ~0ULL;
|
||||
|
||||
fifo8_create(&s->rx_fifo, RXFF_A);
|
||||
fifo8_create(&s->tx_fifo, TXFF_A);
|
||||
|
@ -317,18 +530,25 @@ static int xilinx_spips_post_load(void *opaque, int version_id)
|
|||
|
||||
static const VMStateDescription vmstate_xilinx_spips = {
|
||||
.name = "xilinx_spips",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.minimum_version_id_old = 1,
|
||||
.version_id = 2,
|
||||
.minimum_version_id = 2,
|
||||
.minimum_version_id_old = 2,
|
||||
.post_load = xilinx_spips_post_load,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_FIFO8(tx_fifo, XilinxSPIPS),
|
||||
VMSTATE_FIFO8(rx_fifo, XilinxSPIPS),
|
||||
VMSTATE_UINT32_ARRAY(regs, XilinxSPIPS, R_MAX),
|
||||
VMSTATE_UINT8(snoop_state, XilinxSPIPS),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
static Property xilinx_spips_properties[] = {
|
||||
DEFINE_PROP_UINT8("num-busses", XilinxSPIPS, num_busses, 1),
|
||||
DEFINE_PROP_UINT8("num-ss-bits", XilinxSPIPS, num_cs, 4),
|
||||
DEFINE_PROP_UINT8("num-txrx-bytes", XilinxSPIPS, num_txrx_bytes, 1),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
static void xilinx_spips_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
|
@ -336,6 +556,7 @@ static void xilinx_spips_class_init(ObjectClass *klass, void *data)
|
|||
|
||||
sdc->init = xilinx_spips_init;
|
||||
dc->reset = xilinx_spips_reset;
|
||||
dc->props = xilinx_spips_properties;
|
||||
dc->vmsd = &vmstate_xilinx_spips;
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,8 @@
|
|||
#include "ssi.h"
|
||||
|
||||
#define NUM_SPI_FLASHES 4
|
||||
#define NUM_QSPI_FLASHES 2
|
||||
#define NUM_QSPI_BUSSES 2
|
||||
|
||||
#define FLASH_SIZE (64 * 1024 * 1024)
|
||||
#define FLASH_SECTOR_SIZE (128 * 1024)
|
||||
|
@ -49,30 +51,43 @@ static void gem_init(NICInfo *nd, uint32_t base, qemu_irq irq)
|
|||
sysbus_connect_irq(s, 0, irq);
|
||||
}
|
||||
|
||||
static inline void zynq_init_spi_flashes(uint32_t base_addr, qemu_irq irq)
|
||||
static inline void zynq_init_spi_flashes(uint32_t base_addr, qemu_irq irq,
|
||||
bool is_qspi)
|
||||
{
|
||||
DeviceState *dev;
|
||||
SysBusDevice *busdev;
|
||||
SSIBus *spi;
|
||||
int i;
|
||||
int i, j;
|
||||
int num_busses = is_qspi ? NUM_QSPI_BUSSES : 1;
|
||||
int num_ss = is_qspi ? NUM_QSPI_FLASHES : NUM_SPI_FLASHES;
|
||||
|
||||
dev = qdev_create(NULL, "xilinx,spips");
|
||||
qdev_prop_set_uint8(dev, "num-txrx-bytes", is_qspi ? 4 : 1);
|
||||
qdev_prop_set_uint8(dev, "num-ss-bits", num_ss);
|
||||
qdev_prop_set_uint8(dev, "num-busses", num_busses);
|
||||
qdev_init_nofail(dev);
|
||||
busdev = sysbus_from_qdev(dev);
|
||||
sysbus_mmio_map(busdev, 0, base_addr);
|
||||
if (is_qspi) {
|
||||
sysbus_mmio_map(busdev, 1, 0xFC000000);
|
||||
}
|
||||
sysbus_connect_irq(busdev, 0, irq);
|
||||
|
||||
spi = (SSIBus *)qdev_get_child_bus(dev, "spi");
|
||||
|
||||
for (i = 0; i < NUM_SPI_FLASHES; ++i) {
|
||||
for (i = 0; i < num_busses; ++i) {
|
||||
char bus_name[16];
|
||||
qemu_irq cs_line;
|
||||
|
||||
dev = ssi_create_slave_no_init(spi, "m25p80");
|
||||
qdev_prop_set_string(dev, "partname", "n25q128");
|
||||
qdev_init_nofail(dev);
|
||||
snprintf(bus_name, 16, "spi%d", i);
|
||||
spi = (SSIBus *)qdev_get_child_bus(dev, bus_name);
|
||||
|
||||
cs_line = qdev_get_gpio_in(dev, 0);
|
||||
sysbus_connect_irq(busdev, i+1, cs_line);
|
||||
for (j = 0; j < num_ss; ++j) {
|
||||
dev = ssi_create_slave_no_init(spi, "m25p80");
|
||||
qdev_prop_set_string(dev, "partname", "n25q128");
|
||||
qdev_init_nofail(dev);
|
||||
|
||||
cs_line = qdev_get_gpio_in(dev, 0);
|
||||
sysbus_connect_irq(busdev, i * num_ss + j + 1, cs_line);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -147,8 +162,9 @@ static void zynq_init(QEMUMachineInitArgs *args)
|
|||
pic[n] = qdev_get_gpio_in(dev, n);
|
||||
}
|
||||
|
||||
zynq_init_spi_flashes(0xE0006000, pic[58-IRQ_OFFSET]);
|
||||
zynq_init_spi_flashes(0xE0007000, pic[81-IRQ_OFFSET]);
|
||||
zynq_init_spi_flashes(0xE0006000, pic[58-IRQ_OFFSET], false);
|
||||
zynq_init_spi_flashes(0xE0007000, pic[81-IRQ_OFFSET], false);
|
||||
zynq_init_spi_flashes(0xE000D000, pic[51-IRQ_OFFSET], true);
|
||||
|
||||
sysbus_create_simple("cadence_uart", 0xE0000000, pic[59-IRQ_OFFSET]);
|
||||
sysbus_create_simple("cadence_uart", 0xE0001000, pic[82-IRQ_OFFSET]);
|
||||
|
|
|
@ -155,10 +155,7 @@ static void lx60_reset(void *opaque)
|
|||
cpu_reset(CPU(cpu));
|
||||
}
|
||||
|
||||
static void lx_init(const LxBoardDesc *board,
|
||||
ram_addr_t ram_size, const char *boot_device,
|
||||
const char *kernel_filename, const char *kernel_cmdline,
|
||||
const char *initrd_filename, const char *cpu_model)
|
||||
static void lx_init(const LxBoardDesc *board, QEMUMachineInitArgs *args)
|
||||
{
|
||||
#ifdef TARGET_WORDS_BIGENDIAN
|
||||
int be = 1;
|
||||
|
@ -171,6 +168,9 @@ static void lx_init(const LxBoardDesc *board,
|
|||
MemoryRegion *ram, *rom, *system_io;
|
||||
DriveInfo *dinfo;
|
||||
pflash_t *flash = NULL;
|
||||
const char *cpu_model = args->cpu_model;
|
||||
const char *kernel_filename = args->kernel_filename;
|
||||
const char *kernel_cmdline = args->kernel_cmdline;
|
||||
int n;
|
||||
|
||||
if (!cpu_model) {
|
||||
|
@ -194,7 +194,7 @@ static void lx_init(const LxBoardDesc *board,
|
|||
}
|
||||
|
||||
ram = g_malloc(sizeof(*ram));
|
||||
memory_region_init_ram(ram, "lx60.dram", ram_size);
|
||||
memory_region_init_ram(ram, "lx60.dram", args->ram_size);
|
||||
vmstate_register_ram_global(ram);
|
||||
memory_region_add_subregion(system_memory, 0, ram);
|
||||
|
||||
|
@ -271,38 +271,22 @@ static void lx_init(const LxBoardDesc *board,
|
|||
|
||||
static void xtensa_lx60_init(QEMUMachineInitArgs *args)
|
||||
{
|
||||
ram_addr_t ram_size = args->ram_size;
|
||||
const char *cpu_model = args->cpu_model;
|
||||
const char *kernel_filename = args->kernel_filename;
|
||||
const char *kernel_cmdline = args->kernel_cmdline;
|
||||
const char *initrd_filename = args->initrd_filename;
|
||||
const char *boot_device = args->boot_device;
|
||||
static const LxBoardDesc lx60_board = {
|
||||
.flash_size = 0x400000,
|
||||
.flash_sector_size = 0x10000,
|
||||
.sram_size = 0x20000,
|
||||
};
|
||||
lx_init(&lx60_board, ram_size, boot_device,
|
||||
kernel_filename, kernel_cmdline,
|
||||
initrd_filename, cpu_model);
|
||||
lx_init(&lx60_board, args);
|
||||
}
|
||||
|
||||
static void xtensa_lx200_init(QEMUMachineInitArgs *args)
|
||||
{
|
||||
ram_addr_t ram_size = args->ram_size;
|
||||
const char *cpu_model = args->cpu_model;
|
||||
const char *kernel_filename = args->kernel_filename;
|
||||
const char *kernel_cmdline = args->kernel_cmdline;
|
||||
const char *initrd_filename = args->initrd_filename;
|
||||
const char *boot_device = args->boot_device;
|
||||
static const LxBoardDesc lx200_board = {
|
||||
.flash_size = 0x1000000,
|
||||
.flash_sector_size = 0x20000,
|
||||
.sram_size = 0x2000000,
|
||||
};
|
||||
lx_init(&lx200_board, ram_size, boot_device,
|
||||
kernel_filename, kernel_cmdline,
|
||||
initrd_filename, cpu_model);
|
||||
lx_init(&lx200_board, args);
|
||||
}
|
||||
|
||||
static QEMUMachine xtensa_lx60_machine = {
|
||||
|
|
|
@ -44,16 +44,20 @@ static void sim_reset(void *opaque)
|
|||
cpu_reset(CPU(cpu));
|
||||
}
|
||||
|
||||
static void sim_init(ram_addr_t ram_size,
|
||||
const char *boot_device,
|
||||
const char *kernel_filename, const char *kernel_cmdline,
|
||||
const char *initrd_filename, const char *cpu_model)
|
||||
static void xtensa_sim_init(QEMUMachineInitArgs *args)
|
||||
{
|
||||
XtensaCPU *cpu = NULL;
|
||||
CPUXtensaState *env = NULL;
|
||||
MemoryRegion *ram, *rom;
|
||||
ram_addr_t ram_size = args->ram_size;
|
||||
const char *cpu_model = args->cpu_model;
|
||||
const char *kernel_filename = args->kernel_filename;
|
||||
int n;
|
||||
|
||||
if (!cpu_model) {
|
||||
cpu_model = XTENSA_DEFAULT_CPU_MODEL;
|
||||
}
|
||||
|
||||
for (n = 0; n < smp_cpus; n++) {
|
||||
cpu = cpu_xtensa_init(cpu_model);
|
||||
if (cpu == NULL) {
|
||||
|
@ -96,21 +100,6 @@ static void sim_init(ram_addr_t ram_size,
|
|||
}
|
||||
}
|
||||
|
||||
static void xtensa_sim_init(QEMUMachineInitArgs *args)
|
||||
{
|
||||
ram_addr_t ram_size = args->ram_size;
|
||||
const char *cpu_model = args->cpu_model;
|
||||
const char *kernel_filename = args->kernel_filename;
|
||||
const char *kernel_cmdline = args->kernel_cmdline;
|
||||
const char *initrd_filename = args->initrd_filename;
|
||||
const char *boot_device = args->boot_device;
|
||||
if (!cpu_model) {
|
||||
cpu_model = XTENSA_DEFAULT_CPU_MODEL;
|
||||
}
|
||||
sim_init(ram_size, boot_device, kernel_filename, kernel_cmdline,
|
||||
initrd_filename, cpu_model);
|
||||
}
|
||||
|
||||
static QEMUMachine xtensa_sim_machine = {
|
||||
.name = "sim",
|
||||
.desc = "sim machine (" XTENSA_DEFAULT_CPU_MODEL ")",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue