ide: Split out BMDMA code from ATA core

The ATA core is currently heavily intertwined with BMDMA code. Let's loosen
that a bit, so we can happily replace the DMA backend with different
implementations.

Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Alexander Graf 2010-12-15 00:23:00 +01:00 committed by Kevin Wolf
parent 6ef2ba5ea6
commit 40a6238a20
7 changed files with 446 additions and 298 deletions

View file

@ -33,6 +33,253 @@
#include <hw/ide/pci.h>
#define BMDMA_PAGE_SIZE 4096
static void bmdma_start_dma(IDEDMA *dma, IDEState *s,
BlockDriverCompletionFunc *dma_cb)
{
BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
bm->unit = s->unit;
bm->dma_cb = dma_cb;
bm->cur_prd_last = 0;
bm->cur_prd_addr = 0;
bm->cur_prd_len = 0;
bm->sector_num = ide_get_sector(s);
bm->nsector = s->nsector;
if (bm->status & BM_STATUS_DMAING) {
bm->dma_cb(bmdma_active_if(bm), 0);
}
}
/* return 0 if buffer completed */
static int bmdma_prepare_buf(IDEDMA *dma, int is_write)
{
BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
IDEState *s = bmdma_active_if(bm);
struct {
uint32_t addr;
uint32_t size;
} prd;
int l, len;
qemu_sglist_init(&s->sg, s->nsector / (BMDMA_PAGE_SIZE / 512) + 1);
s->io_buffer_size = 0;
for(;;) {
if (bm->cur_prd_len == 0) {
/* end of table (with a fail safe of one page) */
if (bm->cur_prd_last ||
(bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE)
return s->io_buffer_size != 0;
cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8);
bm->cur_addr += 8;
prd.addr = le32_to_cpu(prd.addr);
prd.size = le32_to_cpu(prd.size);
len = prd.size & 0xfffe;
if (len == 0)
len = 0x10000;
bm->cur_prd_len = len;
bm->cur_prd_addr = prd.addr;
bm->cur_prd_last = (prd.size & 0x80000000);
}
l = bm->cur_prd_len;
if (l > 0) {
qemu_sglist_add(&s->sg, bm->cur_prd_addr, l);
bm->cur_prd_addr += l;
bm->cur_prd_len -= l;
s->io_buffer_size += l;
}
}
return 1;
}
/* return 0 if buffer completed */
static int bmdma_rw_buf(IDEDMA *dma, int is_write)
{
BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
IDEState *s = bmdma_active_if(bm);
struct {
uint32_t addr;
uint32_t size;
} prd;
int l, len;
for(;;) {
l = s->io_buffer_size - s->io_buffer_index;
if (l <= 0)
break;
if (bm->cur_prd_len == 0) {
/* end of table (with a fail safe of one page) */
if (bm->cur_prd_last ||
(bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE)
return 0;
cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8);
bm->cur_addr += 8;
prd.addr = le32_to_cpu(prd.addr);
prd.size = le32_to_cpu(prd.size);
len = prd.size & 0xfffe;
if (len == 0)
len = 0x10000;
bm->cur_prd_len = len;
bm->cur_prd_addr = prd.addr;
bm->cur_prd_last = (prd.size & 0x80000000);
}
if (l > bm->cur_prd_len)
l = bm->cur_prd_len;
if (l > 0) {
if (is_write) {
cpu_physical_memory_write(bm->cur_prd_addr,
s->io_buffer + s->io_buffer_index, l);
} else {
cpu_physical_memory_read(bm->cur_prd_addr,
s->io_buffer + s->io_buffer_index, l);
}
bm->cur_prd_addr += l;
bm->cur_prd_len -= l;
s->io_buffer_index += l;
}
}
return 1;
}
static int bmdma_set_unit(IDEDMA *dma, int unit)
{
BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
bm->unit = unit;
return 0;
}
static int bmdma_add_status(IDEDMA *dma, int status)
{
BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
bm->status |= status;
return 0;
}
static int bmdma_set_inactive(IDEDMA *dma)
{
BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
bm->status &= ~BM_STATUS_DMAING;
bm->dma_cb = NULL;
bm->unit = -1;
return 0;
}
static void bmdma_restart_dma(BMDMAState *bm, int is_read)
{
IDEState *s = bmdma_active_if(bm);
ide_set_sector(s, bm->sector_num);
s->io_buffer_index = 0;
s->io_buffer_size = 0;
s->nsector = bm->nsector;
bm->cur_addr = bm->addr;
if (is_read) {
bm->dma_cb = ide_read_dma_cb;
} else {
bm->dma_cb = ide_write_dma_cb;
}
bmdma_start_dma(&bm->dma, s, bm->dma_cb);
}
static void bmdma_restart_bh(void *opaque)
{
BMDMAState *bm = opaque;
int is_read;
qemu_bh_delete(bm->bh);
bm->bh = NULL;
is_read = !!(bm->status & BM_STATUS_RETRY_READ);
if (bm->status & BM_STATUS_DMA_RETRY) {
bm->status &= ~(BM_STATUS_DMA_RETRY | BM_STATUS_RETRY_READ);
bmdma_restart_dma(bm, is_read);
} else if (bm->status & BM_STATUS_PIO_RETRY) {
bm->status &= ~(BM_STATUS_PIO_RETRY | BM_STATUS_RETRY_READ);
if (is_read) {
ide_sector_read(bmdma_active_if(bm));
} else {
ide_sector_write(bmdma_active_if(bm));
}
} else if (bm->status & BM_STATUS_RETRY_FLUSH) {
ide_flush_cache(bmdma_active_if(bm));
}
}
static void bmdma_restart_cb(void *opaque, int running, int reason)
{
IDEDMA *dma = opaque;
BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
if (!running)
return;
if (!bm->bh) {
bm->bh = qemu_bh_new(bmdma_restart_bh, &bm->dma);
qemu_bh_schedule(bm->bh);
}
}
static void bmdma_cancel(BMDMAState *bm)
{
if (bm->status & BM_STATUS_DMAING) {
/* cancel DMA request */
bmdma_set_inactive(&bm->dma);
}
}
static int bmdma_reset(IDEDMA *dma)
{
BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
#ifdef DEBUG_IDE
printf("ide: dma_reset\n");
#endif
bmdma_cancel(bm);
bm->cmd = 0;
bm->status = 0;
bm->addr = 0;
bm->cur_addr = 0;
bm->cur_prd_last = 0;
bm->cur_prd_addr = 0;
bm->cur_prd_len = 0;
bm->sector_num = 0;
bm->nsector = 0;
return 0;
}
static int bmdma_start_transfer(IDEDMA *dma)
{
return 0;
}
static void bmdma_irq(void *opaque, int n, int level)
{
BMDMAState *bm = opaque;
if (!level) {
/* pass through lower */
qemu_set_irq(bm->irq, level);
return;
}
if (bm) {
bm->status |= BM_STATUS_INT;
}
/* trigger the real irq */
qemu_set_irq(bm->irq, level);
}
void bmdma_cmd_writeb(void *opaque, uint32_t addr, uint32_t val)
{
BMDMAState *bm = opaque;
@ -55,10 +302,10 @@ void bmdma_cmd_writeb(void *opaque, uint32_t addr, uint32_t val)
* whole DMA operation will be submitted to disk with a single
* aio operation with preadv/pwritev.
*/
if (bm->aiocb) {
if (bm->bus->dma->aiocb) {
qemu_aio_flush();
#ifdef DEBUG_IDE
if (bm->aiocb)
if (bm->bus->dma->aiocb)
printf("ide_dma_cancel: aiocb still pending");
if (bm->status & BM_STATUS_DMAING)
printf("ide_dma_cancel: BM_STATUS_DMAING still pending");
@ -70,7 +317,7 @@ void bmdma_cmd_writeb(void *opaque, uint32_t addr, uint32_t val)
bm->status |= BM_STATUS_DMAING;
/* start dma transfer if possible */
if (bm->dma_cb)
bm->dma_cb(bm, 0);
bm->dma_cb(bmdma_active_if(bm), 0);
}
}
}
@ -198,3 +445,30 @@ void pci_ide_create_devs(PCIDevice *dev, DriveInfo **hd_table)
ide_create_drive(d->bus+bus[i], unit[i], hd_table[i]);
}
}
static const struct IDEDMAOps bmdma_ops = {
.start_dma = bmdma_start_dma,
.start_transfer = bmdma_start_transfer,
.prepare_buf = bmdma_prepare_buf,
.rw_buf = bmdma_rw_buf,
.set_unit = bmdma_set_unit,
.add_status = bmdma_add_status,
.set_inactive = bmdma_set_inactive,
.restart_cb = bmdma_restart_cb,
.reset = bmdma_reset,
};
void bmdma_init(IDEBus *bus, BMDMAState *bm)
{
qemu_irq *irq;
if (bus->dma == &bm->dma) {
return;
}
bm->dma.ops = &bmdma_ops;
bus->dma = &bm->dma;
bm->irq = bus->irq;
irq = qemu_allocate_irqs(bmdma_irq, bm, 1);
bus->irq = *irq;
}