mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-09 02:24:58 -06:00
hw: move DMA controllers to hw/dma/, configure with default-configs/
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
ba25df88cc
commit
d2c0bd8458
11 changed files with 13 additions and 10 deletions
|
@ -5,3 +5,9 @@ common-obj-$(CONFIG_PL330) += pl330.o
|
|||
common-obj-$(CONFIG_I82374) += i82374.o
|
||||
common-obj-$(CONFIG_I8257) += i8257.o
|
||||
common-obj-$(CONFIG_XILINX_AXI) += xilinx_axidma.o
|
||||
common-obj-$(CONFIG_ETRAXFS) += etraxfs_dma.o
|
||||
common-obj-$(CONFIG_STP2000) += sparc32_dma.o
|
||||
common-obj-$(CONFIG_SUN4M) += sun4m_iommu.o
|
||||
|
||||
obj-$(CONFIG_OMAP) += omap_dma.o soc_dma.o
|
||||
obj-$(CONFIG_PXA2XX) += pxa2xx_dma.o
|
||||
|
|
781
hw/dma/etraxfs_dma.c
Normal file
781
hw/dma/etraxfs_dma.c
Normal file
|
@ -0,0 +1,781 @@
|
|||
/*
|
||||
* QEMU ETRAX DMA Controller.
|
||||
*
|
||||
* Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
#include <stdio.h>
|
||||
#include <sys/time.h>
|
||||
#include "hw/hw.h"
|
||||
#include "exec/address-spaces.h"
|
||||
#include "qemu-common.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
|
||||
#include "hw/cris/etraxfs_dma.h"
|
||||
|
||||
#define D(x)
|
||||
|
||||
#define RW_DATA (0x0 / 4)
|
||||
#define RW_SAVED_DATA (0x58 / 4)
|
||||
#define RW_SAVED_DATA_BUF (0x5c / 4)
|
||||
#define RW_GROUP (0x60 / 4)
|
||||
#define RW_GROUP_DOWN (0x7c / 4)
|
||||
#define RW_CMD (0x80 / 4)
|
||||
#define RW_CFG (0x84 / 4)
|
||||
#define RW_STAT (0x88 / 4)
|
||||
#define RW_INTR_MASK (0x8c / 4)
|
||||
#define RW_ACK_INTR (0x90 / 4)
|
||||
#define R_INTR (0x94 / 4)
|
||||
#define R_MASKED_INTR (0x98 / 4)
|
||||
#define RW_STREAM_CMD (0x9c / 4)
|
||||
|
||||
#define DMA_REG_MAX (0x100 / 4)
|
||||
|
||||
/* descriptors */
|
||||
|
||||
// ------------------------------------------------------------ dma_descr_group
|
||||
typedef struct dma_descr_group {
|
||||
uint32_t next;
|
||||
unsigned eol : 1;
|
||||
unsigned tol : 1;
|
||||
unsigned bol : 1;
|
||||
unsigned : 1;
|
||||
unsigned intr : 1;
|
||||
unsigned : 2;
|
||||
unsigned en : 1;
|
||||
unsigned : 7;
|
||||
unsigned dis : 1;
|
||||
unsigned md : 16;
|
||||
struct dma_descr_group *up;
|
||||
union {
|
||||
struct dma_descr_context *context;
|
||||
struct dma_descr_group *group;
|
||||
} down;
|
||||
} dma_descr_group;
|
||||
|
||||
// ---------------------------------------------------------- dma_descr_context
|
||||
typedef struct dma_descr_context {
|
||||
uint32_t next;
|
||||
unsigned eol : 1;
|
||||
unsigned : 3;
|
||||
unsigned intr : 1;
|
||||
unsigned : 1;
|
||||
unsigned store_mode : 1;
|
||||
unsigned en : 1;
|
||||
unsigned : 7;
|
||||
unsigned dis : 1;
|
||||
unsigned md0 : 16;
|
||||
unsigned md1;
|
||||
unsigned md2;
|
||||
unsigned md3;
|
||||
unsigned md4;
|
||||
uint32_t saved_data;
|
||||
uint32_t saved_data_buf;
|
||||
} dma_descr_context;
|
||||
|
||||
// ------------------------------------------------------------- dma_descr_data
|
||||
typedef struct dma_descr_data {
|
||||
uint32_t next;
|
||||
uint32_t buf;
|
||||
unsigned eol : 1;
|
||||
unsigned : 2;
|
||||
unsigned out_eop : 1;
|
||||
unsigned intr : 1;
|
||||
unsigned wait : 1;
|
||||
unsigned : 2;
|
||||
unsigned : 3;
|
||||
unsigned in_eop : 1;
|
||||
unsigned : 4;
|
||||
unsigned md : 16;
|
||||
uint32_t after;
|
||||
} dma_descr_data;
|
||||
|
||||
/* Constants */
|
||||
enum {
|
||||
regk_dma_ack_pkt = 0x00000100,
|
||||
regk_dma_anytime = 0x00000001,
|
||||
regk_dma_array = 0x00000008,
|
||||
regk_dma_burst = 0x00000020,
|
||||
regk_dma_client = 0x00000002,
|
||||
regk_dma_copy_next = 0x00000010,
|
||||
regk_dma_copy_up = 0x00000020,
|
||||
regk_dma_data_at_eol = 0x00000001,
|
||||
regk_dma_dis_c = 0x00000010,
|
||||
regk_dma_dis_g = 0x00000020,
|
||||
regk_dma_idle = 0x00000001,
|
||||
regk_dma_intern = 0x00000004,
|
||||
regk_dma_load_c = 0x00000200,
|
||||
regk_dma_load_c_n = 0x00000280,
|
||||
regk_dma_load_c_next = 0x00000240,
|
||||
regk_dma_load_d = 0x00000140,
|
||||
regk_dma_load_g = 0x00000300,
|
||||
regk_dma_load_g_down = 0x000003c0,
|
||||
regk_dma_load_g_next = 0x00000340,
|
||||
regk_dma_load_g_up = 0x00000380,
|
||||
regk_dma_next_en = 0x00000010,
|
||||
regk_dma_next_pkt = 0x00000010,
|
||||
regk_dma_no = 0x00000000,
|
||||
regk_dma_only_at_wait = 0x00000000,
|
||||
regk_dma_restore = 0x00000020,
|
||||
regk_dma_rst = 0x00000001,
|
||||
regk_dma_running = 0x00000004,
|
||||
regk_dma_rw_cfg_default = 0x00000000,
|
||||
regk_dma_rw_cmd_default = 0x00000000,
|
||||
regk_dma_rw_intr_mask_default = 0x00000000,
|
||||
regk_dma_rw_stat_default = 0x00000101,
|
||||
regk_dma_rw_stream_cmd_default = 0x00000000,
|
||||
regk_dma_save_down = 0x00000020,
|
||||
regk_dma_save_up = 0x00000020,
|
||||
regk_dma_set_reg = 0x00000050,
|
||||
regk_dma_set_w_size1 = 0x00000190,
|
||||
regk_dma_set_w_size2 = 0x000001a0,
|
||||
regk_dma_set_w_size4 = 0x000001c0,
|
||||
regk_dma_stopped = 0x00000002,
|
||||
regk_dma_store_c = 0x00000002,
|
||||
regk_dma_store_descr = 0x00000000,
|
||||
regk_dma_store_g = 0x00000004,
|
||||
regk_dma_store_md = 0x00000001,
|
||||
regk_dma_sw = 0x00000008,
|
||||
regk_dma_update_down = 0x00000020,
|
||||
regk_dma_yes = 0x00000001
|
||||
};
|
||||
|
||||
enum dma_ch_state
|
||||
{
|
||||
RST = 1,
|
||||
STOPPED = 2,
|
||||
RUNNING = 4
|
||||
};
|
||||
|
||||
struct fs_dma_channel
|
||||
{
|
||||
qemu_irq irq;
|
||||
struct etraxfs_dma_client *client;
|
||||
|
||||
/* Internal status. */
|
||||
int stream_cmd_src;
|
||||
enum dma_ch_state state;
|
||||
|
||||
unsigned int input : 1;
|
||||
unsigned int eol : 1;
|
||||
|
||||
struct dma_descr_group current_g;
|
||||
struct dma_descr_context current_c;
|
||||
struct dma_descr_data current_d;
|
||||
|
||||
/* Control registers. */
|
||||
uint32_t regs[DMA_REG_MAX];
|
||||
};
|
||||
|
||||
struct fs_dma_ctrl
|
||||
{
|
||||
MemoryRegion mmio;
|
||||
int nr_channels;
|
||||
struct fs_dma_channel *channels;
|
||||
|
||||
QEMUBH *bh;
|
||||
};
|
||||
|
||||
static void DMA_run(void *opaque);
|
||||
static int channel_out_run(struct fs_dma_ctrl *ctrl, int c);
|
||||
|
||||
static inline uint32_t channel_reg(struct fs_dma_ctrl *ctrl, int c, int reg)
|
||||
{
|
||||
return ctrl->channels[c].regs[reg];
|
||||
}
|
||||
|
||||
static inline int channel_stopped(struct fs_dma_ctrl *ctrl, int c)
|
||||
{
|
||||
return channel_reg(ctrl, c, RW_CFG) & 2;
|
||||
}
|
||||
|
||||
static inline int channel_en(struct fs_dma_ctrl *ctrl, int c)
|
||||
{
|
||||
return (channel_reg(ctrl, c, RW_CFG) & 1)
|
||||
&& ctrl->channels[c].client;
|
||||
}
|
||||
|
||||
static inline int fs_channel(hwaddr addr)
|
||||
{
|
||||
/* Every channel has a 0x2000 ctrl register map. */
|
||||
return addr >> 13;
|
||||
}
|
||||
|
||||
#ifdef USE_THIS_DEAD_CODE
|
||||
static void channel_load_g(struct fs_dma_ctrl *ctrl, int c)
|
||||
{
|
||||
hwaddr addr = channel_reg(ctrl, c, RW_GROUP);
|
||||
|
||||
/* Load and decode. FIXME: handle endianness. */
|
||||
cpu_physical_memory_read (addr,
|
||||
(void *) &ctrl->channels[c].current_g,
|
||||
sizeof ctrl->channels[c].current_g);
|
||||
}
|
||||
|
||||
static void dump_c(int ch, struct dma_descr_context *c)
|
||||
{
|
||||
printf("%s ch=%d\n", __func__, ch);
|
||||
printf("next=%x\n", c->next);
|
||||
printf("saved_data=%x\n", c->saved_data);
|
||||
printf("saved_data_buf=%x\n", c->saved_data_buf);
|
||||
printf("eol=%x\n", (uint32_t) c->eol);
|
||||
}
|
||||
|
||||
static void dump_d(int ch, struct dma_descr_data *d)
|
||||
{
|
||||
printf("%s ch=%d\n", __func__, ch);
|
||||
printf("next=%x\n", d->next);
|
||||
printf("buf=%x\n", d->buf);
|
||||
printf("after=%x\n", d->after);
|
||||
printf("intr=%x\n", (uint32_t) d->intr);
|
||||
printf("out_eop=%x\n", (uint32_t) d->out_eop);
|
||||
printf("in_eop=%x\n", (uint32_t) d->in_eop);
|
||||
printf("eol=%x\n", (uint32_t) d->eol);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void channel_load_c(struct fs_dma_ctrl *ctrl, int c)
|
||||
{
|
||||
hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN);
|
||||
|
||||
/* Load and decode. FIXME: handle endianness. */
|
||||
cpu_physical_memory_read (addr,
|
||||
(void *) &ctrl->channels[c].current_c,
|
||||
sizeof ctrl->channels[c].current_c);
|
||||
|
||||
D(dump_c(c, &ctrl->channels[c].current_c));
|
||||
/* I guess this should update the current pos. */
|
||||
ctrl->channels[c].regs[RW_SAVED_DATA] =
|
||||
(uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data;
|
||||
ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
|
||||
(uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data_buf;
|
||||
}
|
||||
|
||||
static void channel_load_d(struct fs_dma_ctrl *ctrl, int c)
|
||||
{
|
||||
hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA);
|
||||
|
||||
/* Load and decode. FIXME: handle endianness. */
|
||||
D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr));
|
||||
cpu_physical_memory_read (addr,
|
||||
(void *) &ctrl->channels[c].current_d,
|
||||
sizeof ctrl->channels[c].current_d);
|
||||
|
||||
D(dump_d(c, &ctrl->channels[c].current_d));
|
||||
ctrl->channels[c].regs[RW_DATA] = addr;
|
||||
}
|
||||
|
||||
static void channel_store_c(struct fs_dma_ctrl *ctrl, int c)
|
||||
{
|
||||
hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN);
|
||||
|
||||
/* Encode and store. FIXME: handle endianness. */
|
||||
D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr));
|
||||
D(dump_d(c, &ctrl->channels[c].current_d));
|
||||
cpu_physical_memory_write (addr,
|
||||
(void *) &ctrl->channels[c].current_c,
|
||||
sizeof ctrl->channels[c].current_c);
|
||||
}
|
||||
|
||||
static void channel_store_d(struct fs_dma_ctrl *ctrl, int c)
|
||||
{
|
||||
hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA);
|
||||
|
||||
/* Encode and store. FIXME: handle endianness. */
|
||||
D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr));
|
||||
cpu_physical_memory_write (addr,
|
||||
(void *) &ctrl->channels[c].current_d,
|
||||
sizeof ctrl->channels[c].current_d);
|
||||
}
|
||||
|
||||
static inline void channel_stop(struct fs_dma_ctrl *ctrl, int c)
|
||||
{
|
||||
/* FIXME: */
|
||||
}
|
||||
|
||||
static inline void channel_start(struct fs_dma_ctrl *ctrl, int c)
|
||||
{
|
||||
if (ctrl->channels[c].client)
|
||||
{
|
||||
ctrl->channels[c].eol = 0;
|
||||
ctrl->channels[c].state = RUNNING;
|
||||
if (!ctrl->channels[c].input)
|
||||
channel_out_run(ctrl, c);
|
||||
} else
|
||||
printf("WARNING: starting DMA ch %d with no client\n", c);
|
||||
|
||||
qemu_bh_schedule_idle(ctrl->bh);
|
||||
}
|
||||
|
||||
static void channel_continue(struct fs_dma_ctrl *ctrl, int c)
|
||||
{
|
||||
if (!channel_en(ctrl, c)
|
||||
|| channel_stopped(ctrl, c)
|
||||
|| ctrl->channels[c].state != RUNNING
|
||||
/* Only reload the current data descriptor if it has eol set. */
|
||||
|| !ctrl->channels[c].current_d.eol) {
|
||||
D(printf("continue failed ch=%d state=%d stopped=%d en=%d eol=%d\n",
|
||||
c, ctrl->channels[c].state,
|
||||
channel_stopped(ctrl, c),
|
||||
channel_en(ctrl,c),
|
||||
ctrl->channels[c].eol));
|
||||
D(dump_d(c, &ctrl->channels[c].current_d));
|
||||
return;
|
||||
}
|
||||
|
||||
/* Reload the current descriptor. */
|
||||
channel_load_d(ctrl, c);
|
||||
|
||||
/* If the current descriptor cleared the eol flag and we had already
|
||||
reached eol state, do the continue. */
|
||||
if (!ctrl->channels[c].current_d.eol && ctrl->channels[c].eol) {
|
||||
D(printf("continue %d ok %x\n", c,
|
||||
ctrl->channels[c].current_d.next));
|
||||
ctrl->channels[c].regs[RW_SAVED_DATA] =
|
||||
(uint32_t)(unsigned long)ctrl->channels[c].current_d.next;
|
||||
channel_load_d(ctrl, c);
|
||||
ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
|
||||
(uint32_t)(unsigned long)ctrl->channels[c].current_d.buf;
|
||||
|
||||
channel_start(ctrl, c);
|
||||
}
|
||||
ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
|
||||
(uint32_t)(unsigned long)ctrl->channels[c].current_d.buf;
|
||||
}
|
||||
|
||||
static void channel_stream_cmd(struct fs_dma_ctrl *ctrl, int c, uint32_t v)
|
||||
{
|
||||
unsigned int cmd = v & ((1 << 10) - 1);
|
||||
|
||||
D(printf("%s ch=%d cmd=%x\n",
|
||||
__func__, c, cmd));
|
||||
if (cmd & regk_dma_load_d) {
|
||||
channel_load_d(ctrl, c);
|
||||
if (cmd & regk_dma_burst)
|
||||
channel_start(ctrl, c);
|
||||
}
|
||||
|
||||
if (cmd & regk_dma_load_c) {
|
||||
channel_load_c(ctrl, c);
|
||||
}
|
||||
}
|
||||
|
||||
static void channel_update_irq(struct fs_dma_ctrl *ctrl, int c)
|
||||
{
|
||||
D(printf("%s %d\n", __func__, c));
|
||||
ctrl->channels[c].regs[R_INTR] &=
|
||||
~(ctrl->channels[c].regs[RW_ACK_INTR]);
|
||||
|
||||
ctrl->channels[c].regs[R_MASKED_INTR] =
|
||||
ctrl->channels[c].regs[R_INTR]
|
||||
& ctrl->channels[c].regs[RW_INTR_MASK];
|
||||
|
||||
D(printf("%s: chan=%d masked_intr=%x\n", __func__,
|
||||
c,
|
||||
ctrl->channels[c].regs[R_MASKED_INTR]));
|
||||
|
||||
qemu_set_irq(ctrl->channels[c].irq,
|
||||
!!ctrl->channels[c].regs[R_MASKED_INTR]);
|
||||
}
|
||||
|
||||
static int channel_out_run(struct fs_dma_ctrl *ctrl, int c)
|
||||
{
|
||||
uint32_t len;
|
||||
uint32_t saved_data_buf;
|
||||
unsigned char buf[2 * 1024];
|
||||
|
||||
struct dma_context_metadata meta;
|
||||
bool send_context = true;
|
||||
|
||||
if (ctrl->channels[c].eol)
|
||||
return 0;
|
||||
|
||||
do {
|
||||
bool out_eop;
|
||||
D(printf("ch=%d buf=%x after=%x\n",
|
||||
c,
|
||||
(uint32_t)ctrl->channels[c].current_d.buf,
|
||||
(uint32_t)ctrl->channels[c].current_d.after));
|
||||
|
||||
if (send_context) {
|
||||
if (ctrl->channels[c].client->client.metadata_push) {
|
||||
meta.metadata = ctrl->channels[c].current_d.md;
|
||||
ctrl->channels[c].client->client.metadata_push(
|
||||
ctrl->channels[c].client->client.opaque,
|
||||
&meta);
|
||||
}
|
||||
send_context = false;
|
||||
}
|
||||
|
||||
channel_load_d(ctrl, c);
|
||||
saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF);
|
||||
len = (uint32_t)(unsigned long)
|
||||
ctrl->channels[c].current_d.after;
|
||||
len -= saved_data_buf;
|
||||
|
||||
if (len > sizeof buf)
|
||||
len = sizeof buf;
|
||||
cpu_physical_memory_read (saved_data_buf, buf, len);
|
||||
|
||||
out_eop = ((saved_data_buf + len) ==
|
||||
ctrl->channels[c].current_d.after) &&
|
||||
ctrl->channels[c].current_d.out_eop;
|
||||
|
||||
D(printf("channel %d pushes %x %u bytes eop=%u\n", c,
|
||||
saved_data_buf, len, out_eop));
|
||||
|
||||
if (ctrl->channels[c].client->client.push)
|
||||
ctrl->channels[c].client->client.push(
|
||||
ctrl->channels[c].client->client.opaque,
|
||||
buf, len, out_eop);
|
||||
else
|
||||
printf("WARNING: DMA ch%d dataloss,"
|
||||
" no attached client.\n", c);
|
||||
|
||||
saved_data_buf += len;
|
||||
|
||||
if (saved_data_buf == (uint32_t)(unsigned long)
|
||||
ctrl->channels[c].current_d.after) {
|
||||
/* Done. Step to next. */
|
||||
if (ctrl->channels[c].current_d.out_eop) {
|
||||
send_context = true;
|
||||
}
|
||||
if (ctrl->channels[c].current_d.intr) {
|
||||
/* data intr. */
|
||||
D(printf("signal intr %d eol=%d\n",
|
||||
len, ctrl->channels[c].current_d.eol));
|
||||
ctrl->channels[c].regs[R_INTR] |= (1 << 2);
|
||||
channel_update_irq(ctrl, c);
|
||||
}
|
||||
channel_store_d(ctrl, c);
|
||||
if (ctrl->channels[c].current_d.eol) {
|
||||
D(printf("channel %d EOL\n", c));
|
||||
ctrl->channels[c].eol = 1;
|
||||
|
||||
/* Mark the context as disabled. */
|
||||
ctrl->channels[c].current_c.dis = 1;
|
||||
channel_store_c(ctrl, c);
|
||||
|
||||
channel_stop(ctrl, c);
|
||||
} else {
|
||||
ctrl->channels[c].regs[RW_SAVED_DATA] =
|
||||
(uint32_t)(unsigned long)ctrl->
|
||||
channels[c].current_d.next;
|
||||
/* Load new descriptor. */
|
||||
channel_load_d(ctrl, c);
|
||||
saved_data_buf = (uint32_t)(unsigned long)
|
||||
ctrl->channels[c].current_d.buf;
|
||||
}
|
||||
|
||||
ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
|
||||
saved_data_buf;
|
||||
D(dump_d(c, &ctrl->channels[c].current_d));
|
||||
}
|
||||
ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
|
||||
} while (!ctrl->channels[c].eol);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int channel_in_process(struct fs_dma_ctrl *ctrl, int c,
|
||||
unsigned char *buf, int buflen, int eop)
|
||||
{
|
||||
uint32_t len;
|
||||
uint32_t saved_data_buf;
|
||||
|
||||
if (ctrl->channels[c].eol == 1)
|
||||
return 0;
|
||||
|
||||
channel_load_d(ctrl, c);
|
||||
saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF);
|
||||
len = (uint32_t)(unsigned long)ctrl->channels[c].current_d.after;
|
||||
len -= saved_data_buf;
|
||||
|
||||
if (len > buflen)
|
||||
len = buflen;
|
||||
|
||||
cpu_physical_memory_write (saved_data_buf, buf, len);
|
||||
saved_data_buf += len;
|
||||
|
||||
if (saved_data_buf ==
|
||||
(uint32_t)(unsigned long)ctrl->channels[c].current_d.after
|
||||
|| eop) {
|
||||
uint32_t r_intr = ctrl->channels[c].regs[R_INTR];
|
||||
|
||||
D(printf("in dscr end len=%d\n",
|
||||
ctrl->channels[c].current_d.after
|
||||
- ctrl->channels[c].current_d.buf));
|
||||
ctrl->channels[c].current_d.after = saved_data_buf;
|
||||
|
||||
/* Done. Step to next. */
|
||||
if (ctrl->channels[c].current_d.intr) {
|
||||
/* TODO: signal eop to the client. */
|
||||
/* data intr. */
|
||||
ctrl->channels[c].regs[R_INTR] |= 3;
|
||||
}
|
||||
if (eop) {
|
||||
ctrl->channels[c].current_d.in_eop = 1;
|
||||
ctrl->channels[c].regs[R_INTR] |= 8;
|
||||
}
|
||||
if (r_intr != ctrl->channels[c].regs[R_INTR])
|
||||
channel_update_irq(ctrl, c);
|
||||
|
||||
channel_store_d(ctrl, c);
|
||||
D(dump_d(c, &ctrl->channels[c].current_d));
|
||||
|
||||
if (ctrl->channels[c].current_d.eol) {
|
||||
D(printf("channel %d EOL\n", c));
|
||||
ctrl->channels[c].eol = 1;
|
||||
|
||||
/* Mark the context as disabled. */
|
||||
ctrl->channels[c].current_c.dis = 1;
|
||||
channel_store_c(ctrl, c);
|
||||
|
||||
channel_stop(ctrl, c);
|
||||
} else {
|
||||
ctrl->channels[c].regs[RW_SAVED_DATA] =
|
||||
(uint32_t)(unsigned long)ctrl->
|
||||
channels[c].current_d.next;
|
||||
/* Load new descriptor. */
|
||||
channel_load_d(ctrl, c);
|
||||
saved_data_buf = (uint32_t)(unsigned long)
|
||||
ctrl->channels[c].current_d.buf;
|
||||
}
|
||||
}
|
||||
|
||||
ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
|
||||
return len;
|
||||
}
|
||||
|
||||
static inline int channel_in_run(struct fs_dma_ctrl *ctrl, int c)
|
||||
{
|
||||
if (ctrl->channels[c].client->client.pull) {
|
||||
ctrl->channels[c].client->client.pull(
|
||||
ctrl->channels[c].client->client.opaque);
|
||||
return 1;
|
||||
} else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint32_t dma_rinvalid (void *opaque, hwaddr addr)
|
||||
{
|
||||
hw_error("Unsupported short raccess. reg=" TARGET_FMT_plx "\n", addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
dma_read(void *opaque, hwaddr addr, unsigned int size)
|
||||
{
|
||||
struct fs_dma_ctrl *ctrl = opaque;
|
||||
int c;
|
||||
uint32_t r = 0;
|
||||
|
||||
if (size != 4) {
|
||||
dma_rinvalid(opaque, addr);
|
||||
}
|
||||
|
||||
/* Make addr relative to this channel and bounded to nr regs. */
|
||||
c = fs_channel(addr);
|
||||
addr &= 0xff;
|
||||
addr >>= 2;
|
||||
switch (addr)
|
||||
{
|
||||
case RW_STAT:
|
||||
r = ctrl->channels[c].state & 7;
|
||||
r |= ctrl->channels[c].eol << 5;
|
||||
r |= ctrl->channels[c].stream_cmd_src << 8;
|
||||
break;
|
||||
|
||||
default:
|
||||
r = ctrl->channels[c].regs[addr];
|
||||
D(printf ("%s c=%d addr=" TARGET_FMT_plx "\n",
|
||||
__func__, c, addr));
|
||||
break;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
static void
|
||||
dma_winvalid (void *opaque, hwaddr addr, uint32_t value)
|
||||
{
|
||||
hw_error("Unsupported short waccess. reg=" TARGET_FMT_plx "\n", addr);
|
||||
}
|
||||
|
||||
static void
|
||||
dma_update_state(struct fs_dma_ctrl *ctrl, int c)
|
||||
{
|
||||
if (ctrl->channels[c].regs[RW_CFG] & 2)
|
||||
ctrl->channels[c].state = STOPPED;
|
||||
if (!(ctrl->channels[c].regs[RW_CFG] & 1))
|
||||
ctrl->channels[c].state = RST;
|
||||
}
|
||||
|
||||
static void
|
||||
dma_write(void *opaque, hwaddr addr,
|
||||
uint64_t val64, unsigned int size)
|
||||
{
|
||||
struct fs_dma_ctrl *ctrl = opaque;
|
||||
uint32_t value = val64;
|
||||
int c;
|
||||
|
||||
if (size != 4) {
|
||||
dma_winvalid(opaque, addr, value);
|
||||
}
|
||||
|
||||
/* Make addr relative to this channel and bounded to nr regs. */
|
||||
c = fs_channel(addr);
|
||||
addr &= 0xff;
|
||||
addr >>= 2;
|
||||
switch (addr)
|
||||
{
|
||||
case RW_DATA:
|
||||
ctrl->channels[c].regs[addr] = value;
|
||||
break;
|
||||
|
||||
case RW_CFG:
|
||||
ctrl->channels[c].regs[addr] = value;
|
||||
dma_update_state(ctrl, c);
|
||||
break;
|
||||
case RW_CMD:
|
||||
/* continue. */
|
||||
if (value & ~1)
|
||||
printf("Invalid store to ch=%d RW_CMD %x\n",
|
||||
c, value);
|
||||
ctrl->channels[c].regs[addr] = value;
|
||||
channel_continue(ctrl, c);
|
||||
break;
|
||||
|
||||
case RW_SAVED_DATA:
|
||||
case RW_SAVED_DATA_BUF:
|
||||
case RW_GROUP:
|
||||
case RW_GROUP_DOWN:
|
||||
ctrl->channels[c].regs[addr] = value;
|
||||
break;
|
||||
|
||||
case RW_ACK_INTR:
|
||||
case RW_INTR_MASK:
|
||||
ctrl->channels[c].regs[addr] = value;
|
||||
channel_update_irq(ctrl, c);
|
||||
if (addr == RW_ACK_INTR)
|
||||
ctrl->channels[c].regs[RW_ACK_INTR] = 0;
|
||||
break;
|
||||
|
||||
case RW_STREAM_CMD:
|
||||
if (value & ~1023)
|
||||
printf("Invalid store to ch=%d "
|
||||
"RW_STREAMCMD %x\n",
|
||||
c, value);
|
||||
ctrl->channels[c].regs[addr] = value;
|
||||
D(printf("stream_cmd ch=%d\n", c));
|
||||
channel_stream_cmd(ctrl, c, value);
|
||||
break;
|
||||
|
||||
default:
|
||||
D(printf ("%s c=%d " TARGET_FMT_plx "\n",
|
||||
__func__, c, addr));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static const MemoryRegionOps dma_ops = {
|
||||
.read = dma_read,
|
||||
.write = dma_write,
|
||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
.valid = {
|
||||
.min_access_size = 1,
|
||||
.max_access_size = 4
|
||||
}
|
||||
};
|
||||
|
||||
static int etraxfs_dmac_run(void *opaque)
|
||||
{
|
||||
struct fs_dma_ctrl *ctrl = opaque;
|
||||
int i;
|
||||
int p = 0;
|
||||
|
||||
for (i = 0;
|
||||
i < ctrl->nr_channels;
|
||||
i++)
|
||||
{
|
||||
if (ctrl->channels[i].state == RUNNING)
|
||||
{
|
||||
if (ctrl->channels[i].input) {
|
||||
p += channel_in_run(ctrl, i);
|
||||
} else {
|
||||
p += channel_out_run(ctrl, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
int etraxfs_dmac_input(struct etraxfs_dma_client *client,
|
||||
void *buf, int len, int eop)
|
||||
{
|
||||
return channel_in_process(client->ctrl, client->channel,
|
||||
buf, len, eop);
|
||||
}
|
||||
|
||||
/* Connect an IRQ line with a channel. */
|
||||
void etraxfs_dmac_connect(void *opaque, int c, qemu_irq *line, int input)
|
||||
{
|
||||
struct fs_dma_ctrl *ctrl = opaque;
|
||||
ctrl->channels[c].irq = *line;
|
||||
ctrl->channels[c].input = input;
|
||||
}
|
||||
|
||||
void etraxfs_dmac_connect_client(void *opaque, int c,
|
||||
struct etraxfs_dma_client *cl)
|
||||
{
|
||||
struct fs_dma_ctrl *ctrl = opaque;
|
||||
cl->ctrl = ctrl;
|
||||
cl->channel = c;
|
||||
ctrl->channels[c].client = cl;
|
||||
}
|
||||
|
||||
|
||||
static void DMA_run(void *opaque)
|
||||
{
|
||||
struct fs_dma_ctrl *etraxfs_dmac = opaque;
|
||||
int p = 1;
|
||||
|
||||
if (runstate_is_running())
|
||||
p = etraxfs_dmac_run(etraxfs_dmac);
|
||||
|
||||
if (p)
|
||||
qemu_bh_schedule_idle(etraxfs_dmac->bh);
|
||||
}
|
||||
|
||||
void *etraxfs_dmac_init(hwaddr base, int nr_channels)
|
||||
{
|
||||
struct fs_dma_ctrl *ctrl = NULL;
|
||||
|
||||
ctrl = g_malloc0(sizeof *ctrl);
|
||||
|
||||
ctrl->bh = qemu_bh_new(DMA_run, ctrl);
|
||||
|
||||
ctrl->nr_channels = nr_channels;
|
||||
ctrl->channels = g_malloc0(sizeof ctrl->channels[0] * nr_channels);
|
||||
|
||||
memory_region_init_io(&ctrl->mmio, &dma_ops, ctrl, "etraxfs-dma",
|
||||
nr_channels * 0x2000);
|
||||
memory_region_add_subregion(get_system_memory(), base, &ctrl->mmio);
|
||||
|
||||
return ctrl;
|
||||
}
|
2101
hw/dma/omap_dma.c
Normal file
2101
hw/dma/omap_dma.c
Normal file
File diff suppressed because it is too large
Load diff
574
hw/dma/pxa2xx_dma.c
Normal file
574
hw/dma/pxa2xx_dma.c
Normal file
|
@ -0,0 +1,574 @@
|
|||
/*
|
||||
* Intel XScale PXA255/270 DMA controller.
|
||||
*
|
||||
* Copyright (c) 2006 Openedhand Ltd.
|
||||
* Copyright (c) 2006 Thorsten Zitterell
|
||||
* Written by Andrzej Zaborowski <balrog@zabor.org>
|
||||
*
|
||||
* This code is licensed under the GPL.
|
||||
*/
|
||||
|
||||
#include "hw/hw.h"
|
||||
#include "hw/arm/pxa.h"
|
||||
#include "hw/sysbus.h"
|
||||
|
||||
#define PXA255_DMA_NUM_CHANNELS 16
|
||||
#define PXA27X_DMA_NUM_CHANNELS 32
|
||||
|
||||
#define PXA2XX_DMA_NUM_REQUESTS 75
|
||||
|
||||
typedef struct {
|
||||
uint32_t descr;
|
||||
uint32_t src;
|
||||
uint32_t dest;
|
||||
uint32_t cmd;
|
||||
uint32_t state;
|
||||
int request;
|
||||
} PXA2xxDMAChannel;
|
||||
|
||||
typedef struct PXA2xxDMAState {
|
||||
SysBusDevice busdev;
|
||||
MemoryRegion iomem;
|
||||
qemu_irq irq;
|
||||
|
||||
uint32_t stopintr;
|
||||
uint32_t eorintr;
|
||||
uint32_t rasintr;
|
||||
uint32_t startintr;
|
||||
uint32_t endintr;
|
||||
|
||||
uint32_t align;
|
||||
uint32_t pio;
|
||||
|
||||
int channels;
|
||||
PXA2xxDMAChannel *chan;
|
||||
|
||||
uint8_t req[PXA2XX_DMA_NUM_REQUESTS];
|
||||
|
||||
/* Flag to avoid recursive DMA invocations. */
|
||||
int running;
|
||||
} PXA2xxDMAState;
|
||||
|
||||
#define DCSR0 0x0000 /* DMA Control / Status register for Channel 0 */
|
||||
#define DCSR31 0x007c /* DMA Control / Status register for Channel 31 */
|
||||
#define DALGN 0x00a0 /* DMA Alignment register */
|
||||
#define DPCSR 0x00a4 /* DMA Programmed I/O Control Status register */
|
||||
#define DRQSR0 0x00e0 /* DMA DREQ<0> Status register */
|
||||
#define DRQSR1 0x00e4 /* DMA DREQ<1> Status register */
|
||||
#define DRQSR2 0x00e8 /* DMA DREQ<2> Status register */
|
||||
#define DINT 0x00f0 /* DMA Interrupt register */
|
||||
#define DRCMR0 0x0100 /* Request to Channel Map register 0 */
|
||||
#define DRCMR63 0x01fc /* Request to Channel Map register 63 */
|
||||
#define D_CH0 0x0200 /* Channel 0 Descriptor start */
|
||||
#define DRCMR64 0x1100 /* Request to Channel Map register 64 */
|
||||
#define DRCMR74 0x1128 /* Request to Channel Map register 74 */
|
||||
|
||||
/* Per-channel register */
|
||||
#define DDADR 0x00
|
||||
#define DSADR 0x01
|
||||
#define DTADR 0x02
|
||||
#define DCMD 0x03
|
||||
|
||||
/* Bit-field masks */
|
||||
#define DRCMR_CHLNUM 0x1f
|
||||
#define DRCMR_MAPVLD (1 << 7)
|
||||
#define DDADR_STOP (1 << 0)
|
||||
#define DDADR_BREN (1 << 1)
|
||||
#define DCMD_LEN 0x1fff
|
||||
#define DCMD_WIDTH(x) (1 << ((((x) >> 14) & 3) - 1))
|
||||
#define DCMD_SIZE(x) (4 << (((x) >> 16) & 3))
|
||||
#define DCMD_FLYBYT (1 << 19)
|
||||
#define DCMD_FLYBYS (1 << 20)
|
||||
#define DCMD_ENDIRQEN (1 << 21)
|
||||
#define DCMD_STARTIRQEN (1 << 22)
|
||||
#define DCMD_CMPEN (1 << 25)
|
||||
#define DCMD_FLOWTRG (1 << 28)
|
||||
#define DCMD_FLOWSRC (1 << 29)
|
||||
#define DCMD_INCTRGADDR (1 << 30)
|
||||
#define DCMD_INCSRCADDR (1 << 31)
|
||||
#define DCSR_BUSERRINTR (1 << 0)
|
||||
#define DCSR_STARTINTR (1 << 1)
|
||||
#define DCSR_ENDINTR (1 << 2)
|
||||
#define DCSR_STOPINTR (1 << 3)
|
||||
#define DCSR_RASINTR (1 << 4)
|
||||
#define DCSR_REQPEND (1 << 8)
|
||||
#define DCSR_EORINT (1 << 9)
|
||||
#define DCSR_CMPST (1 << 10)
|
||||
#define DCSR_MASKRUN (1 << 22)
|
||||
#define DCSR_RASIRQEN (1 << 23)
|
||||
#define DCSR_CLRCMPST (1 << 24)
|
||||
#define DCSR_SETCMPST (1 << 25)
|
||||
#define DCSR_EORSTOPEN (1 << 26)
|
||||
#define DCSR_EORJMPEN (1 << 27)
|
||||
#define DCSR_EORIRQEN (1 << 28)
|
||||
#define DCSR_STOPIRQEN (1 << 29)
|
||||
#define DCSR_NODESCFETCH (1 << 30)
|
||||
#define DCSR_RUN (1 << 31)
|
||||
|
||||
static inline void pxa2xx_dma_update(PXA2xxDMAState *s, int ch)
|
||||
{
|
||||
if (ch >= 0) {
|
||||
if ((s->chan[ch].state & DCSR_STOPIRQEN) &&
|
||||
(s->chan[ch].state & DCSR_STOPINTR))
|
||||
s->stopintr |= 1 << ch;
|
||||
else
|
||||
s->stopintr &= ~(1 << ch);
|
||||
|
||||
if ((s->chan[ch].state & DCSR_EORIRQEN) &&
|
||||
(s->chan[ch].state & DCSR_EORINT))
|
||||
s->eorintr |= 1 << ch;
|
||||
else
|
||||
s->eorintr &= ~(1 << ch);
|
||||
|
||||
if ((s->chan[ch].state & DCSR_RASIRQEN) &&
|
||||
(s->chan[ch].state & DCSR_RASINTR))
|
||||
s->rasintr |= 1 << ch;
|
||||
else
|
||||
s->rasintr &= ~(1 << ch);
|
||||
|
||||
if (s->chan[ch].state & DCSR_STARTINTR)
|
||||
s->startintr |= 1 << ch;
|
||||
else
|
||||
s->startintr &= ~(1 << ch);
|
||||
|
||||
if (s->chan[ch].state & DCSR_ENDINTR)
|
||||
s->endintr |= 1 << ch;
|
||||
else
|
||||
s->endintr &= ~(1 << ch);
|
||||
}
|
||||
|
||||
if (s->stopintr | s->eorintr | s->rasintr | s->startintr | s->endintr)
|
||||
qemu_irq_raise(s->irq);
|
||||
else
|
||||
qemu_irq_lower(s->irq);
|
||||
}
|
||||
|
||||
static inline void pxa2xx_dma_descriptor_fetch(
|
||||
PXA2xxDMAState *s, int ch)
|
||||
{
|
||||
uint32_t desc[4];
|
||||
hwaddr daddr = s->chan[ch].descr & ~0xf;
|
||||
if ((s->chan[ch].descr & DDADR_BREN) && (s->chan[ch].state & DCSR_CMPST))
|
||||
daddr += 32;
|
||||
|
||||
cpu_physical_memory_read(daddr, (uint8_t *) desc, 16);
|
||||
s->chan[ch].descr = desc[DDADR];
|
||||
s->chan[ch].src = desc[DSADR];
|
||||
s->chan[ch].dest = desc[DTADR];
|
||||
s->chan[ch].cmd = desc[DCMD];
|
||||
|
||||
if (s->chan[ch].cmd & DCMD_FLOWSRC)
|
||||
s->chan[ch].src &= ~3;
|
||||
if (s->chan[ch].cmd & DCMD_FLOWTRG)
|
||||
s->chan[ch].dest &= ~3;
|
||||
|
||||
if (s->chan[ch].cmd & (DCMD_CMPEN | DCMD_FLYBYS | DCMD_FLYBYT))
|
||||
printf("%s: unsupported mode in channel %i\n", __FUNCTION__, ch);
|
||||
|
||||
if (s->chan[ch].cmd & DCMD_STARTIRQEN)
|
||||
s->chan[ch].state |= DCSR_STARTINTR;
|
||||
}
|
||||
|
||||
static void pxa2xx_dma_run(PXA2xxDMAState *s)
|
||||
{
|
||||
int c, srcinc, destinc;
|
||||
uint32_t n, size;
|
||||
uint32_t width;
|
||||
uint32_t length;
|
||||
uint8_t buffer[32];
|
||||
PXA2xxDMAChannel *ch;
|
||||
|
||||
if (s->running ++)
|
||||
return;
|
||||
|
||||
while (s->running) {
|
||||
s->running = 1;
|
||||
for (c = 0; c < s->channels; c ++) {
|
||||
ch = &s->chan[c];
|
||||
|
||||
while ((ch->state & DCSR_RUN) && !(ch->state & DCSR_STOPINTR)) {
|
||||
/* Test for pending requests */
|
||||
if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && !ch->request)
|
||||
break;
|
||||
|
||||
length = ch->cmd & DCMD_LEN;
|
||||
size = DCMD_SIZE(ch->cmd);
|
||||
width = DCMD_WIDTH(ch->cmd);
|
||||
|
||||
srcinc = (ch->cmd & DCMD_INCSRCADDR) ? width : 0;
|
||||
destinc = (ch->cmd & DCMD_INCTRGADDR) ? width : 0;
|
||||
|
||||
while (length) {
|
||||
size = MIN(length, size);
|
||||
|
||||
for (n = 0; n < size; n += width) {
|
||||
cpu_physical_memory_read(ch->src, buffer + n, width);
|
||||
ch->src += srcinc;
|
||||
}
|
||||
|
||||
for (n = 0; n < size; n += width) {
|
||||
cpu_physical_memory_write(ch->dest, buffer + n, width);
|
||||
ch->dest += destinc;
|
||||
}
|
||||
|
||||
length -= size;
|
||||
|
||||
if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) &&
|
||||
!ch->request) {
|
||||
ch->state |= DCSR_EORINT;
|
||||
if (ch->state & DCSR_EORSTOPEN)
|
||||
ch->state |= DCSR_STOPINTR;
|
||||
if ((ch->state & DCSR_EORJMPEN) &&
|
||||
!(ch->state & DCSR_NODESCFETCH))
|
||||
pxa2xx_dma_descriptor_fetch(s, c);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ch->cmd = (ch->cmd & ~DCMD_LEN) | length;
|
||||
|
||||
/* Is the transfer complete now? */
|
||||
if (!length) {
|
||||
if (ch->cmd & DCMD_ENDIRQEN)
|
||||
ch->state |= DCSR_ENDINTR;
|
||||
|
||||
if ((ch->state & DCSR_NODESCFETCH) ||
|
||||
(ch->descr & DDADR_STOP) ||
|
||||
(ch->state & DCSR_EORSTOPEN)) {
|
||||
ch->state |= DCSR_STOPINTR;
|
||||
ch->state &= ~DCSR_RUN;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
ch->state |= DCSR_STOPINTR;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s->running --;
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t pxa2xx_dma_read(void *opaque, hwaddr offset,
|
||||
unsigned size)
|
||||
{
|
||||
PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
|
||||
unsigned int channel;
|
||||
|
||||
if (size != 4) {
|
||||
hw_error("%s: Bad access width\n", __FUNCTION__);
|
||||
return 5;
|
||||
}
|
||||
|
||||
switch (offset) {
|
||||
case DRCMR64 ... DRCMR74:
|
||||
offset -= DRCMR64 - DRCMR0 - (64 << 2);
|
||||
/* Fall through */
|
||||
case DRCMR0 ... DRCMR63:
|
||||
channel = (offset - DRCMR0) >> 2;
|
||||
return s->req[channel];
|
||||
|
||||
case DRQSR0:
|
||||
case DRQSR1:
|
||||
case DRQSR2:
|
||||
return 0;
|
||||
|
||||
case DCSR0 ... DCSR31:
|
||||
channel = offset >> 2;
|
||||
if (s->chan[channel].request)
|
||||
return s->chan[channel].state | DCSR_REQPEND;
|
||||
return s->chan[channel].state;
|
||||
|
||||
case DINT:
|
||||
return s->stopintr | s->eorintr | s->rasintr |
|
||||
s->startintr | s->endintr;
|
||||
|
||||
case DALGN:
|
||||
return s->align;
|
||||
|
||||
case DPCSR:
|
||||
return s->pio;
|
||||
}
|
||||
|
||||
if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) {
|
||||
channel = (offset - D_CH0) >> 4;
|
||||
switch ((offset & 0x0f) >> 2) {
|
||||
case DDADR:
|
||||
return s->chan[channel].descr;
|
||||
case DSADR:
|
||||
return s->chan[channel].src;
|
||||
case DTADR:
|
||||
return s->chan[channel].dest;
|
||||
case DCMD:
|
||||
return s->chan[channel].cmd;
|
||||
}
|
||||
}
|
||||
|
||||
hw_error("%s: Bad offset 0x" TARGET_FMT_plx "\n", __FUNCTION__, offset);
|
||||
return 7;
|
||||
}
|
||||
|
||||
static void pxa2xx_dma_write(void *opaque, hwaddr offset,
|
||||
uint64_t value, unsigned size)
|
||||
{
|
||||
PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
|
||||
unsigned int channel;
|
||||
|
||||
if (size != 4) {
|
||||
hw_error("%s: Bad access width\n", __FUNCTION__);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (offset) {
|
||||
case DRCMR64 ... DRCMR74:
|
||||
offset -= DRCMR64 - DRCMR0 - (64 << 2);
|
||||
/* Fall through */
|
||||
case DRCMR0 ... DRCMR63:
|
||||
channel = (offset - DRCMR0) >> 2;
|
||||
|
||||
if (value & DRCMR_MAPVLD)
|
||||
if ((value & DRCMR_CHLNUM) > s->channels)
|
||||
hw_error("%s: Bad DMA channel %i\n",
|
||||
__FUNCTION__, (unsigned)value & DRCMR_CHLNUM);
|
||||
|
||||
s->req[channel] = value;
|
||||
break;
|
||||
|
||||
case DRQSR0:
|
||||
case DRQSR1:
|
||||
case DRQSR2:
|
||||
/* Nothing to do */
|
||||
break;
|
||||
|
||||
case DCSR0 ... DCSR31:
|
||||
channel = offset >> 2;
|
||||
s->chan[channel].state &= 0x0000071f & ~(value &
|
||||
(DCSR_EORINT | DCSR_ENDINTR |
|
||||
DCSR_STARTINTR | DCSR_BUSERRINTR));
|
||||
s->chan[channel].state |= value & 0xfc800000;
|
||||
|
||||
if (s->chan[channel].state & DCSR_STOPIRQEN)
|
||||
s->chan[channel].state &= ~DCSR_STOPINTR;
|
||||
|
||||
if (value & DCSR_NODESCFETCH) {
|
||||
/* No-descriptor-fetch mode */
|
||||
if (value & DCSR_RUN) {
|
||||
s->chan[channel].state &= ~DCSR_STOPINTR;
|
||||
pxa2xx_dma_run(s);
|
||||
}
|
||||
} else {
|
||||
/* Descriptor-fetch mode */
|
||||
if (value & DCSR_RUN) {
|
||||
s->chan[channel].state &= ~DCSR_STOPINTR;
|
||||
pxa2xx_dma_descriptor_fetch(s, channel);
|
||||
pxa2xx_dma_run(s);
|
||||
}
|
||||
}
|
||||
|
||||
/* Shouldn't matter as our DMA is synchronous. */
|
||||
if (!(value & (DCSR_RUN | DCSR_MASKRUN)))
|
||||
s->chan[channel].state |= DCSR_STOPINTR;
|
||||
|
||||
if (value & DCSR_CLRCMPST)
|
||||
s->chan[channel].state &= ~DCSR_CMPST;
|
||||
if (value & DCSR_SETCMPST)
|
||||
s->chan[channel].state |= DCSR_CMPST;
|
||||
|
||||
pxa2xx_dma_update(s, channel);
|
||||
break;
|
||||
|
||||
case DALGN:
|
||||
s->align = value;
|
||||
break;
|
||||
|
||||
case DPCSR:
|
||||
s->pio = value & 0x80000001;
|
||||
break;
|
||||
|
||||
default:
|
||||
if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) {
|
||||
channel = (offset - D_CH0) >> 4;
|
||||
switch ((offset & 0x0f) >> 2) {
|
||||
case DDADR:
|
||||
s->chan[channel].descr = value;
|
||||
break;
|
||||
case DSADR:
|
||||
s->chan[channel].src = value;
|
||||
break;
|
||||
case DTADR:
|
||||
s->chan[channel].dest = value;
|
||||
break;
|
||||
case DCMD:
|
||||
s->chan[channel].cmd = value;
|
||||
break;
|
||||
default:
|
||||
goto fail;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
fail:
|
||||
hw_error("%s: Bad offset " TARGET_FMT_plx "\n", __FUNCTION__, offset);
|
||||
}
|
||||
}
|
||||
|
||||
static const MemoryRegionOps pxa2xx_dma_ops = {
|
||||
.read = pxa2xx_dma_read,
|
||||
.write = pxa2xx_dma_write,
|
||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
};
|
||||
|
||||
static void pxa2xx_dma_request(void *opaque, int req_num, int on)
|
||||
{
|
||||
PXA2xxDMAState *s = opaque;
|
||||
int ch;
|
||||
if (req_num < 0 || req_num >= PXA2XX_DMA_NUM_REQUESTS)
|
||||
hw_error("%s: Bad DMA request %i\n", __FUNCTION__, req_num);
|
||||
|
||||
if (!(s->req[req_num] & DRCMR_MAPVLD))
|
||||
return;
|
||||
ch = s->req[req_num] & DRCMR_CHLNUM;
|
||||
|
||||
if (!s->chan[ch].request && on)
|
||||
s->chan[ch].state |= DCSR_RASINTR;
|
||||
else
|
||||
s->chan[ch].state &= ~DCSR_RASINTR;
|
||||
if (s->chan[ch].request && !on)
|
||||
s->chan[ch].state |= DCSR_EORINT;
|
||||
|
||||
s->chan[ch].request = on;
|
||||
if (on) {
|
||||
pxa2xx_dma_run(s);
|
||||
pxa2xx_dma_update(s, ch);
|
||||
}
|
||||
}
|
||||
|
||||
static int pxa2xx_dma_init(SysBusDevice *dev)
|
||||
{
|
||||
int i;
|
||||
PXA2xxDMAState *s;
|
||||
s = FROM_SYSBUS(PXA2xxDMAState, dev);
|
||||
|
||||
if (s->channels <= 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
s->chan = g_malloc0(sizeof(PXA2xxDMAChannel) * s->channels);
|
||||
|
||||
memset(s->chan, 0, sizeof(PXA2xxDMAChannel) * s->channels);
|
||||
for (i = 0; i < s->channels; i ++)
|
||||
s->chan[i].state = DCSR_STOPINTR;
|
||||
|
||||
memset(s->req, 0, sizeof(uint8_t) * PXA2XX_DMA_NUM_REQUESTS);
|
||||
|
||||
qdev_init_gpio_in(&dev->qdev, pxa2xx_dma_request, PXA2XX_DMA_NUM_REQUESTS);
|
||||
|
||||
memory_region_init_io(&s->iomem, &pxa2xx_dma_ops, s,
|
||||
"pxa2xx.dma", 0x00010000);
|
||||
sysbus_init_mmio(dev, &s->iomem);
|
||||
sysbus_init_irq(dev, &s->irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
DeviceState *pxa27x_dma_init(hwaddr base, qemu_irq irq)
|
||||
{
|
||||
DeviceState *dev;
|
||||
|
||||
dev = qdev_create(NULL, "pxa2xx-dma");
|
||||
qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS);
|
||||
qdev_init_nofail(dev);
|
||||
|
||||
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
|
||||
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
||||
DeviceState *pxa255_dma_init(hwaddr base, qemu_irq irq)
|
||||
{
|
||||
DeviceState *dev;
|
||||
|
||||
dev = qdev_create(NULL, "pxa2xx-dma");
|
||||
qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS);
|
||||
qdev_init_nofail(dev);
|
||||
|
||||
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
|
||||
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
||||
static bool is_version_0(void *opaque, int version_id)
|
||||
{
|
||||
return version_id == 0;
|
||||
}
|
||||
|
||||
static VMStateDescription vmstate_pxa2xx_dma_chan = {
|
||||
.name = "pxa2xx_dma_chan",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.minimum_version_id_old = 1,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT32(descr, PXA2xxDMAChannel),
|
||||
VMSTATE_UINT32(src, PXA2xxDMAChannel),
|
||||
VMSTATE_UINT32(dest, PXA2xxDMAChannel),
|
||||
VMSTATE_UINT32(cmd, PXA2xxDMAChannel),
|
||||
VMSTATE_UINT32(state, PXA2xxDMAChannel),
|
||||
VMSTATE_INT32(request, PXA2xxDMAChannel),
|
||||
VMSTATE_END_OF_LIST(),
|
||||
},
|
||||
};
|
||||
|
||||
static VMStateDescription vmstate_pxa2xx_dma = {
|
||||
.name = "pxa2xx_dma",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 0,
|
||||
.minimum_version_id_old = 0,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UNUSED_TEST(is_version_0, 4),
|
||||
VMSTATE_UINT32(stopintr, PXA2xxDMAState),
|
||||
VMSTATE_UINT32(eorintr, PXA2xxDMAState),
|
||||
VMSTATE_UINT32(rasintr, PXA2xxDMAState),
|
||||
VMSTATE_UINT32(startintr, PXA2xxDMAState),
|
||||
VMSTATE_UINT32(endintr, PXA2xxDMAState),
|
||||
VMSTATE_UINT32(align, PXA2xxDMAState),
|
||||
VMSTATE_UINT32(pio, PXA2xxDMAState),
|
||||
VMSTATE_BUFFER(req, PXA2xxDMAState),
|
||||
VMSTATE_STRUCT_VARRAY_POINTER_INT32(chan, PXA2xxDMAState, channels,
|
||||
vmstate_pxa2xx_dma_chan, PXA2xxDMAChannel),
|
||||
VMSTATE_END_OF_LIST(),
|
||||
},
|
||||
};
|
||||
|
||||
static Property pxa2xx_dma_properties[] = {
|
||||
DEFINE_PROP_INT32("channels", PXA2xxDMAState, channels, -1),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static void pxa2xx_dma_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
|
||||
|
||||
k->init = pxa2xx_dma_init;
|
||||
dc->desc = "PXA2xx DMA controller";
|
||||
dc->vmsd = &vmstate_pxa2xx_dma;
|
||||
dc->props = pxa2xx_dma_properties;
|
||||
}
|
||||
|
||||
static const TypeInfo pxa2xx_dma_info = {
|
||||
.name = "pxa2xx-dma",
|
||||
.parent = TYPE_SYS_BUS_DEVICE,
|
||||
.instance_size = sizeof(PXA2xxDMAState),
|
||||
.class_init = pxa2xx_dma_class_init,
|
||||
};
|
||||
|
||||
static void pxa2xx_dma_register_types(void)
|
||||
{
|
||||
type_register_static(&pxa2xx_dma_info);
|
||||
}
|
||||
|
||||
type_init(pxa2xx_dma_register_types)
|
366
hw/dma/soc_dma.c
Normal file
366
hw/dma/soc_dma.c
Normal file
|
@ -0,0 +1,366 @@
|
|||
/*
|
||||
* On-chip DMA controller framework.
|
||||
*
|
||||
* Copyright (C) 2008 Nokia Corporation
|
||||
* Written by Andrzej Zaborowski <andrew@openedhand.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation; either version 2 or
|
||||
* (at your option) version 3 of the License.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include "qemu-common.h"
|
||||
#include "qemu/timer.h"
|
||||
#include "hw/arm/soc_dma.h"
|
||||
|
||||
static void transfer_mem2mem(struct soc_dma_ch_s *ch)
|
||||
{
|
||||
memcpy(ch->paddr[0], ch->paddr[1], ch->bytes);
|
||||
ch->paddr[0] += ch->bytes;
|
||||
ch->paddr[1] += ch->bytes;
|
||||
}
|
||||
|
||||
static void transfer_mem2fifo(struct soc_dma_ch_s *ch)
|
||||
{
|
||||
ch->io_fn[1](ch->io_opaque[1], ch->paddr[0], ch->bytes);
|
||||
ch->paddr[0] += ch->bytes;
|
||||
}
|
||||
|
||||
static void transfer_fifo2mem(struct soc_dma_ch_s *ch)
|
||||
{
|
||||
ch->io_fn[0](ch->io_opaque[0], ch->paddr[1], ch->bytes);
|
||||
ch->paddr[1] += ch->bytes;
|
||||
}
|
||||
|
||||
/* This is further optimisable but isn't very important because often
|
||||
* DMA peripherals forbid this kind of transfers and even when they don't,
|
||||
* oprating systems may not need to use them. */
|
||||
static void *fifo_buf;
|
||||
static int fifo_size;
|
||||
static void transfer_fifo2fifo(struct soc_dma_ch_s *ch)
|
||||
{
|
||||
if (ch->bytes > fifo_size)
|
||||
fifo_buf = g_realloc(fifo_buf, fifo_size = ch->bytes);
|
||||
|
||||
/* Implement as transfer_fifo2linear + transfer_linear2fifo. */
|
||||
ch->io_fn[0](ch->io_opaque[0], fifo_buf, ch->bytes);
|
||||
ch->io_fn[1](ch->io_opaque[1], fifo_buf, ch->bytes);
|
||||
}
|
||||
|
||||
struct dma_s {
|
||||
struct soc_dma_s soc;
|
||||
int chnum;
|
||||
uint64_t ch_enable_mask;
|
||||
int64_t channel_freq;
|
||||
int enabled_count;
|
||||
|
||||
struct memmap_entry_s {
|
||||
enum soc_dma_port_type type;
|
||||
hwaddr addr;
|
||||
union {
|
||||
struct {
|
||||
void *opaque;
|
||||
soc_dma_io_t fn;
|
||||
int out;
|
||||
} fifo;
|
||||
struct {
|
||||
void *base;
|
||||
size_t size;
|
||||
} mem;
|
||||
} u;
|
||||
} *memmap;
|
||||
int memmap_size;
|
||||
|
||||
struct soc_dma_ch_s ch[0];
|
||||
};
|
||||
|
||||
static void soc_dma_ch_schedule(struct soc_dma_ch_s *ch, int delay_bytes)
|
||||
{
|
||||
int64_t now = qemu_get_clock_ns(vm_clock);
|
||||
struct dma_s *dma = (struct dma_s *) ch->dma;
|
||||
|
||||
qemu_mod_timer(ch->timer, now + delay_bytes / dma->channel_freq);
|
||||
}
|
||||
|
||||
static void soc_dma_ch_run(void *opaque)
|
||||
{
|
||||
struct soc_dma_ch_s *ch = (struct soc_dma_ch_s *) opaque;
|
||||
|
||||
ch->running = 1;
|
||||
ch->dma->setup_fn(ch);
|
||||
ch->transfer_fn(ch);
|
||||
ch->running = 0;
|
||||
|
||||
if (ch->enable)
|
||||
soc_dma_ch_schedule(ch, ch->bytes);
|
||||
ch->bytes = 0;
|
||||
}
|
||||
|
||||
static inline struct memmap_entry_s *soc_dma_lookup(struct dma_s *dma,
|
||||
hwaddr addr)
|
||||
{
|
||||
struct memmap_entry_s *lo;
|
||||
int hi;
|
||||
|
||||
lo = dma->memmap;
|
||||
hi = dma->memmap_size;
|
||||
|
||||
while (hi > 1) {
|
||||
hi /= 2;
|
||||
if (lo[hi].addr <= addr)
|
||||
lo += hi;
|
||||
}
|
||||
|
||||
return lo;
|
||||
}
|
||||
|
||||
static inline enum soc_dma_port_type soc_dma_ch_update_type(
|
||||
struct soc_dma_ch_s *ch, int port)
|
||||
{
|
||||
struct dma_s *dma = (struct dma_s *) ch->dma;
|
||||
struct memmap_entry_s *entry = soc_dma_lookup(dma, ch->vaddr[port]);
|
||||
|
||||
if (entry->type == soc_dma_port_fifo) {
|
||||
while (entry < dma->memmap + dma->memmap_size &&
|
||||
entry->u.fifo.out != port)
|
||||
entry ++;
|
||||
if (entry->addr != ch->vaddr[port] || entry->u.fifo.out != port)
|
||||
return soc_dma_port_other;
|
||||
|
||||
if (ch->type[port] != soc_dma_access_const)
|
||||
return soc_dma_port_other;
|
||||
|
||||
ch->io_fn[port] = entry->u.fifo.fn;
|
||||
ch->io_opaque[port] = entry->u.fifo.opaque;
|
||||
return soc_dma_port_fifo;
|
||||
} else if (entry->type == soc_dma_port_mem) {
|
||||
if (entry->addr > ch->vaddr[port] ||
|
||||
entry->addr + entry->u.mem.size <= ch->vaddr[port])
|
||||
return soc_dma_port_other;
|
||||
|
||||
/* TODO: support constant memory address for source port as used for
|
||||
* drawing solid rectangles by PalmOS(R). */
|
||||
if (ch->type[port] != soc_dma_access_const)
|
||||
return soc_dma_port_other;
|
||||
|
||||
ch->paddr[port] = (uint8_t *) entry->u.mem.base +
|
||||
(ch->vaddr[port] - entry->addr);
|
||||
/* TODO: save bytes left to the end of the mapping somewhere so we
|
||||
* can check we're not reading beyond it. */
|
||||
return soc_dma_port_mem;
|
||||
} else
|
||||
return soc_dma_port_other;
|
||||
}
|
||||
|
||||
void soc_dma_ch_update(struct soc_dma_ch_s *ch)
|
||||
{
|
||||
enum soc_dma_port_type src, dst;
|
||||
|
||||
src = soc_dma_ch_update_type(ch, 0);
|
||||
if (src == soc_dma_port_other) {
|
||||
ch->update = 0;
|
||||
ch->transfer_fn = ch->dma->transfer_fn;
|
||||
return;
|
||||
}
|
||||
dst = soc_dma_ch_update_type(ch, 1);
|
||||
|
||||
/* TODO: use src and dst as array indices. */
|
||||
if (src == soc_dma_port_mem && dst == soc_dma_port_mem)
|
||||
ch->transfer_fn = transfer_mem2mem;
|
||||
else if (src == soc_dma_port_mem && dst == soc_dma_port_fifo)
|
||||
ch->transfer_fn = transfer_mem2fifo;
|
||||
else if (src == soc_dma_port_fifo && dst == soc_dma_port_mem)
|
||||
ch->transfer_fn = transfer_fifo2mem;
|
||||
else if (src == soc_dma_port_fifo && dst == soc_dma_port_fifo)
|
||||
ch->transfer_fn = transfer_fifo2fifo;
|
||||
else
|
||||
ch->transfer_fn = ch->dma->transfer_fn;
|
||||
|
||||
ch->update = (dst != soc_dma_port_other);
|
||||
}
|
||||
|
||||
static void soc_dma_ch_freq_update(struct dma_s *s)
|
||||
{
|
||||
if (s->enabled_count)
|
||||
/* We completely ignore channel priorities and stuff */
|
||||
s->channel_freq = s->soc.freq / s->enabled_count;
|
||||
else {
|
||||
/* TODO: Signal that we want to disable the functional clock and let
|
||||
* the platform code decide what to do with it, i.e. check that
|
||||
* auto-idle is enabled in the clock controller and if we are stopping
|
||||
* the clock, do the same with any parent clocks that had only one
|
||||
* user keeping them on and auto-idle enabled. */
|
||||
}
|
||||
}
|
||||
|
||||
void soc_dma_set_request(struct soc_dma_ch_s *ch, int level)
|
||||
{
|
||||
struct dma_s *dma = (struct dma_s *) ch->dma;
|
||||
|
||||
dma->enabled_count += level - ch->enable;
|
||||
|
||||
if (level)
|
||||
dma->ch_enable_mask |= 1 << ch->num;
|
||||
else
|
||||
dma->ch_enable_mask &= ~(1 << ch->num);
|
||||
|
||||
if (level != ch->enable) {
|
||||
soc_dma_ch_freq_update(dma);
|
||||
ch->enable = level;
|
||||
|
||||
if (!ch->enable)
|
||||
qemu_del_timer(ch->timer);
|
||||
else if (!ch->running)
|
||||
soc_dma_ch_run(ch);
|
||||
else
|
||||
soc_dma_ch_schedule(ch, 1);
|
||||
}
|
||||
}
|
||||
|
||||
void soc_dma_reset(struct soc_dma_s *soc)
|
||||
{
|
||||
struct dma_s *s = (struct dma_s *) soc;
|
||||
|
||||
s->soc.drqbmp = 0;
|
||||
s->ch_enable_mask = 0;
|
||||
s->enabled_count = 0;
|
||||
soc_dma_ch_freq_update(s);
|
||||
}
|
||||
|
||||
/* TODO: take a functional-clock argument */
|
||||
struct soc_dma_s *soc_dma_init(int n)
|
||||
{
|
||||
int i;
|
||||
struct dma_s *s = g_malloc0(sizeof(*s) + n * sizeof(*s->ch));
|
||||
|
||||
s->chnum = n;
|
||||
s->soc.ch = s->ch;
|
||||
for (i = 0; i < n; i ++) {
|
||||
s->ch[i].dma = &s->soc;
|
||||
s->ch[i].num = i;
|
||||
s->ch[i].timer = qemu_new_timer_ns(vm_clock, soc_dma_ch_run, &s->ch[i]);
|
||||
}
|
||||
|
||||
soc_dma_reset(&s->soc);
|
||||
fifo_size = 0;
|
||||
|
||||
return &s->soc;
|
||||
}
|
||||
|
||||
void soc_dma_port_add_fifo(struct soc_dma_s *soc, hwaddr virt_base,
|
||||
soc_dma_io_t fn, void *opaque, int out)
|
||||
{
|
||||
struct memmap_entry_s *entry;
|
||||
struct dma_s *dma = (struct dma_s *) soc;
|
||||
|
||||
dma->memmap = g_realloc(dma->memmap, sizeof(*entry) *
|
||||
(dma->memmap_size + 1));
|
||||
entry = soc_dma_lookup(dma, virt_base);
|
||||
|
||||
if (dma->memmap_size) {
|
||||
if (entry->type == soc_dma_port_mem) {
|
||||
if (entry->addr <= virt_base &&
|
||||
entry->addr + entry->u.mem.size > virt_base) {
|
||||
fprintf(stderr, "%s: FIFO at " TARGET_FMT_lx
|
||||
" collides with RAM region at " TARGET_FMT_lx
|
||||
"-" TARGET_FMT_lx "\n", __FUNCTION__,
|
||||
(target_ulong) virt_base,
|
||||
(target_ulong) entry->addr, (target_ulong)
|
||||
(entry->addr + entry->u.mem.size));
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
if (entry->addr <= virt_base)
|
||||
entry ++;
|
||||
} else
|
||||
while (entry < dma->memmap + dma->memmap_size &&
|
||||
entry->addr <= virt_base) {
|
||||
if (entry->addr == virt_base && entry->u.fifo.out == out) {
|
||||
fprintf(stderr, "%s: FIFO at " TARGET_FMT_lx
|
||||
" collides FIFO at " TARGET_FMT_lx "\n",
|
||||
__FUNCTION__, (target_ulong) virt_base,
|
||||
(target_ulong) entry->addr);
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
entry ++;
|
||||
}
|
||||
|
||||
memmove(entry + 1, entry,
|
||||
(uint8_t *) (dma->memmap + dma->memmap_size ++) -
|
||||
(uint8_t *) entry);
|
||||
} else
|
||||
dma->memmap_size ++;
|
||||
|
||||
entry->addr = virt_base;
|
||||
entry->type = soc_dma_port_fifo;
|
||||
entry->u.fifo.fn = fn;
|
||||
entry->u.fifo.opaque = opaque;
|
||||
entry->u.fifo.out = out;
|
||||
}
|
||||
|
||||
void soc_dma_port_add_mem(struct soc_dma_s *soc, uint8_t *phys_base,
|
||||
hwaddr virt_base, size_t size)
|
||||
{
|
||||
struct memmap_entry_s *entry;
|
||||
struct dma_s *dma = (struct dma_s *) soc;
|
||||
|
||||
dma->memmap = g_realloc(dma->memmap, sizeof(*entry) *
|
||||
(dma->memmap_size + 1));
|
||||
entry = soc_dma_lookup(dma, virt_base);
|
||||
|
||||
if (dma->memmap_size) {
|
||||
if (entry->type == soc_dma_port_mem) {
|
||||
if ((entry->addr >= virt_base && entry->addr < virt_base + size) ||
|
||||
(entry->addr <= virt_base &&
|
||||
entry->addr + entry->u.mem.size > virt_base)) {
|
||||
fprintf(stderr, "%s: RAM at " TARGET_FMT_lx "-" TARGET_FMT_lx
|
||||
" collides with RAM region at " TARGET_FMT_lx
|
||||
"-" TARGET_FMT_lx "\n", __FUNCTION__,
|
||||
(target_ulong) virt_base,
|
||||
(target_ulong) (virt_base + size),
|
||||
(target_ulong) entry->addr, (target_ulong)
|
||||
(entry->addr + entry->u.mem.size));
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
if (entry->addr <= virt_base)
|
||||
entry ++;
|
||||
} else {
|
||||
if (entry->addr >= virt_base &&
|
||||
entry->addr < virt_base + size) {
|
||||
fprintf(stderr, "%s: RAM at " TARGET_FMT_lx "-" TARGET_FMT_lx
|
||||
" collides with FIFO at " TARGET_FMT_lx
|
||||
"\n", __FUNCTION__,
|
||||
(target_ulong) virt_base,
|
||||
(target_ulong) (virt_base + size),
|
||||
(target_ulong) entry->addr);
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
while (entry < dma->memmap + dma->memmap_size &&
|
||||
entry->addr <= virt_base)
|
||||
entry ++;
|
||||
}
|
||||
|
||||
memmove(entry + 1, entry,
|
||||
(uint8_t *) (dma->memmap + dma->memmap_size ++) -
|
||||
(uint8_t *) entry);
|
||||
} else
|
||||
dma->memmap_size ++;
|
||||
|
||||
entry->addr = virt_base;
|
||||
entry->type = soc_dma_port_mem;
|
||||
entry->u.mem.base = phys_base;
|
||||
entry->u.mem.size = size;
|
||||
}
|
||||
|
||||
/* TODO: port removal for ports like PCMCIA memory */
|
315
hw/dma/sparc32_dma.c
Normal file
315
hw/dma/sparc32_dma.c
Normal file
|
@ -0,0 +1,315 @@
|
|||
/*
|
||||
* QEMU Sparc32 DMA controller emulation
|
||||
*
|
||||
* Copyright (c) 2006 Fabrice Bellard
|
||||
*
|
||||
* Modifications:
|
||||
* 2010-Feb-14 Artyom Tarasenko : reworked irq generation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "hw/hw.h"
|
||||
#include "hw/sparc/sparc32_dma.h"
|
||||
#include "hw/sparc/sun4m.h"
|
||||
#include "hw/sysbus.h"
|
||||
#include "trace.h"
|
||||
|
||||
/*
|
||||
* This is the DMA controller part of chip STP2000 (Master I/O), also
|
||||
* produced as NCR89C100. See
|
||||
* http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
|
||||
* and
|
||||
* http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/DMA2.txt
|
||||
*/
|
||||
|
||||
#define DMA_REGS 4
|
||||
#define DMA_SIZE (4 * sizeof(uint32_t))
|
||||
/* We need the mask, because one instance of the device is not page
|
||||
aligned (ledma, start address 0x0010) */
|
||||
#define DMA_MASK (DMA_SIZE - 1)
|
||||
/* OBP says 0x20 bytes for ledma, the extras are aliased to espdma */
|
||||
#define DMA_ETH_SIZE (8 * sizeof(uint32_t))
|
||||
#define DMA_MAX_REG_OFFSET (2 * DMA_SIZE - 1)
|
||||
|
||||
#define DMA_VER 0xa0000000
|
||||
#define DMA_INTR 1
|
||||
#define DMA_INTREN 0x10
|
||||
#define DMA_WRITE_MEM 0x100
|
||||
#define DMA_EN 0x200
|
||||
#define DMA_LOADED 0x04000000
|
||||
#define DMA_DRAIN_FIFO 0x40
|
||||
#define DMA_RESET 0x80
|
||||
|
||||
/* XXX SCSI and ethernet should have different read-only bit masks */
|
||||
#define DMA_CSR_RO_MASK 0xfe000007
|
||||
|
||||
typedef struct DMAState DMAState;
|
||||
|
||||
struct DMAState {
|
||||
SysBusDevice busdev;
|
||||
MemoryRegion iomem;
|
||||
uint32_t dmaregs[DMA_REGS];
|
||||
qemu_irq irq;
|
||||
void *iommu;
|
||||
qemu_irq gpio[2];
|
||||
uint32_t is_ledma;
|
||||
};
|
||||
|
||||
enum {
|
||||
GPIO_RESET = 0,
|
||||
GPIO_DMA,
|
||||
};
|
||||
|
||||
/* Note: on sparc, the lance 16 bit bus is swapped */
|
||||
void ledma_memory_read(void *opaque, hwaddr addr,
|
||||
uint8_t *buf, int len, int do_bswap)
|
||||
{
|
||||
DMAState *s = opaque;
|
||||
int i;
|
||||
|
||||
addr |= s->dmaregs[3];
|
||||
trace_ledma_memory_read(addr);
|
||||
if (do_bswap) {
|
||||
sparc_iommu_memory_read(s->iommu, addr, buf, len);
|
||||
} else {
|
||||
addr &= ~1;
|
||||
len &= ~1;
|
||||
sparc_iommu_memory_read(s->iommu, addr, buf, len);
|
||||
for(i = 0; i < len; i += 2) {
|
||||
bswap16s((uint16_t *)(buf + i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ledma_memory_write(void *opaque, hwaddr addr,
|
||||
uint8_t *buf, int len, int do_bswap)
|
||||
{
|
||||
DMAState *s = opaque;
|
||||
int l, i;
|
||||
uint16_t tmp_buf[32];
|
||||
|
||||
addr |= s->dmaregs[3];
|
||||
trace_ledma_memory_write(addr);
|
||||
if (do_bswap) {
|
||||
sparc_iommu_memory_write(s->iommu, addr, buf, len);
|
||||
} else {
|
||||
addr &= ~1;
|
||||
len &= ~1;
|
||||
while (len > 0) {
|
||||
l = len;
|
||||
if (l > sizeof(tmp_buf))
|
||||
l = sizeof(tmp_buf);
|
||||
for(i = 0; i < l; i += 2) {
|
||||
tmp_buf[i >> 1] = bswap16(*(uint16_t *)(buf + i));
|
||||
}
|
||||
sparc_iommu_memory_write(s->iommu, addr, (uint8_t *)tmp_buf, l);
|
||||
len -= l;
|
||||
buf += l;
|
||||
addr += l;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void dma_set_irq(void *opaque, int irq, int level)
|
||||
{
|
||||
DMAState *s = opaque;
|
||||
if (level) {
|
||||
s->dmaregs[0] |= DMA_INTR;
|
||||
if (s->dmaregs[0] & DMA_INTREN) {
|
||||
trace_sparc32_dma_set_irq_raise();
|
||||
qemu_irq_raise(s->irq);
|
||||
}
|
||||
} else {
|
||||
if (s->dmaregs[0] & DMA_INTR) {
|
||||
s->dmaregs[0] &= ~DMA_INTR;
|
||||
if (s->dmaregs[0] & DMA_INTREN) {
|
||||
trace_sparc32_dma_set_irq_lower();
|
||||
qemu_irq_lower(s->irq);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void espdma_memory_read(void *opaque, uint8_t *buf, int len)
|
||||
{
|
||||
DMAState *s = opaque;
|
||||
|
||||
trace_espdma_memory_read(s->dmaregs[1]);
|
||||
sparc_iommu_memory_read(s->iommu, s->dmaregs[1], buf, len);
|
||||
s->dmaregs[1] += len;
|
||||
}
|
||||
|
||||
void espdma_memory_write(void *opaque, uint8_t *buf, int len)
|
||||
{
|
||||
DMAState *s = opaque;
|
||||
|
||||
trace_espdma_memory_write(s->dmaregs[1]);
|
||||
sparc_iommu_memory_write(s->iommu, s->dmaregs[1], buf, len);
|
||||
s->dmaregs[1] += len;
|
||||
}
|
||||
|
||||
static uint64_t dma_mem_read(void *opaque, hwaddr addr,
|
||||
unsigned size)
|
||||
{
|
||||
DMAState *s = opaque;
|
||||
uint32_t saddr;
|
||||
|
||||
if (s->is_ledma && (addr > DMA_MAX_REG_OFFSET)) {
|
||||
/* aliased to espdma, but we can't get there from here */
|
||||
/* buggy driver if using undocumented behavior, just return 0 */
|
||||
trace_sparc32_dma_mem_readl(addr, 0);
|
||||
return 0;
|
||||
}
|
||||
saddr = (addr & DMA_MASK) >> 2;
|
||||
trace_sparc32_dma_mem_readl(addr, s->dmaregs[saddr]);
|
||||
return s->dmaregs[saddr];
|
||||
}
|
||||
|
||||
static void dma_mem_write(void *opaque, hwaddr addr,
|
||||
uint64_t val, unsigned size)
|
||||
{
|
||||
DMAState *s = opaque;
|
||||
uint32_t saddr;
|
||||
|
||||
if (s->is_ledma && (addr > DMA_MAX_REG_OFFSET)) {
|
||||
/* aliased to espdma, but we can't get there from here */
|
||||
trace_sparc32_dma_mem_writel(addr, 0, val);
|
||||
return;
|
||||
}
|
||||
saddr = (addr & DMA_MASK) >> 2;
|
||||
trace_sparc32_dma_mem_writel(addr, s->dmaregs[saddr], val);
|
||||
switch (saddr) {
|
||||
case 0:
|
||||
if (val & DMA_INTREN) {
|
||||
if (s->dmaregs[0] & DMA_INTR) {
|
||||
trace_sparc32_dma_set_irq_raise();
|
||||
qemu_irq_raise(s->irq);
|
||||
}
|
||||
} else {
|
||||
if (s->dmaregs[0] & (DMA_INTR | DMA_INTREN)) {
|
||||
trace_sparc32_dma_set_irq_lower();
|
||||
qemu_irq_lower(s->irq);
|
||||
}
|
||||
}
|
||||
if (val & DMA_RESET) {
|
||||
qemu_irq_raise(s->gpio[GPIO_RESET]);
|
||||
qemu_irq_lower(s->gpio[GPIO_RESET]);
|
||||
} else if (val & DMA_DRAIN_FIFO) {
|
||||
val &= ~DMA_DRAIN_FIFO;
|
||||
} else if (val == 0)
|
||||
val = DMA_DRAIN_FIFO;
|
||||
|
||||
if (val & DMA_EN && !(s->dmaregs[0] & DMA_EN)) {
|
||||
trace_sparc32_dma_enable_raise();
|
||||
qemu_irq_raise(s->gpio[GPIO_DMA]);
|
||||
} else if (!(val & DMA_EN) && !!(s->dmaregs[0] & DMA_EN)) {
|
||||
trace_sparc32_dma_enable_lower();
|
||||
qemu_irq_lower(s->gpio[GPIO_DMA]);
|
||||
}
|
||||
|
||||
val &= ~DMA_CSR_RO_MASK;
|
||||
val |= DMA_VER;
|
||||
s->dmaregs[0] = (s->dmaregs[0] & DMA_CSR_RO_MASK) | val;
|
||||
break;
|
||||
case 1:
|
||||
s->dmaregs[0] |= DMA_LOADED;
|
||||
/* fall through */
|
||||
default:
|
||||
s->dmaregs[saddr] = val;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static const MemoryRegionOps dma_mem_ops = {
|
||||
.read = dma_mem_read,
|
||||
.write = dma_mem_write,
|
||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
.valid = {
|
||||
.min_access_size = 4,
|
||||
.max_access_size = 4,
|
||||
},
|
||||
};
|
||||
|
||||
static void dma_reset(DeviceState *d)
|
||||
{
|
||||
DMAState *s = container_of(d, DMAState, busdev.qdev);
|
||||
|
||||
memset(s->dmaregs, 0, DMA_SIZE);
|
||||
s->dmaregs[0] = DMA_VER;
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_dma = {
|
||||
.name ="sparc32_dma",
|
||||
.version_id = 2,
|
||||
.minimum_version_id = 2,
|
||||
.minimum_version_id_old = 2,
|
||||
.fields = (VMStateField []) {
|
||||
VMSTATE_UINT32_ARRAY(dmaregs, DMAState, DMA_REGS),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
static int sparc32_dma_init1(SysBusDevice *dev)
|
||||
{
|
||||
DMAState *s = FROM_SYSBUS(DMAState, dev);
|
||||
int reg_size;
|
||||
|
||||
sysbus_init_irq(dev, &s->irq);
|
||||
|
||||
reg_size = s->is_ledma ? DMA_ETH_SIZE : DMA_SIZE;
|
||||
memory_region_init_io(&s->iomem, &dma_mem_ops, s, "dma", reg_size);
|
||||
sysbus_init_mmio(dev, &s->iomem);
|
||||
|
||||
qdev_init_gpio_in(&dev->qdev, dma_set_irq, 1);
|
||||
qdev_init_gpio_out(&dev->qdev, s->gpio, 2);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static Property sparc32_dma_properties[] = {
|
||||
DEFINE_PROP_PTR("iommu_opaque", DMAState, iommu),
|
||||
DEFINE_PROP_UINT32("is_ledma", DMAState, is_ledma, 0),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static void sparc32_dma_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
|
||||
|
||||
k->init = sparc32_dma_init1;
|
||||
dc->reset = dma_reset;
|
||||
dc->vmsd = &vmstate_dma;
|
||||
dc->props = sparc32_dma_properties;
|
||||
}
|
||||
|
||||
static const TypeInfo sparc32_dma_info = {
|
||||
.name = "sparc32_dma",
|
||||
.parent = TYPE_SYS_BUS_DEVICE,
|
||||
.instance_size = sizeof(DMAState),
|
||||
.class_init = sparc32_dma_class_init,
|
||||
};
|
||||
|
||||
static void sparc32_dma_register_types(void)
|
||||
{
|
||||
type_register_static(&sparc32_dma_info);
|
||||
}
|
||||
|
||||
type_init(sparc32_dma_register_types)
|
387
hw/dma/sun4m_iommu.c
Normal file
387
hw/dma/sun4m_iommu.c
Normal file
|
@ -0,0 +1,387 @@
|
|||
/*
|
||||
* QEMU Sun4m iommu emulation
|
||||
*
|
||||
* Copyright (c) 2003-2005 Fabrice Bellard
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "hw/sparc/sun4m.h"
|
||||
#include "hw/sysbus.h"
|
||||
#include "trace.h"
|
||||
|
||||
/*
|
||||
* I/O MMU used by Sun4m systems
|
||||
*
|
||||
* Chipset docs:
|
||||
* "Sun-4M System Architecture (revision 2.0) by Chuck Narad", 950-1373-01,
|
||||
* http://mediacast.sun.com/users/Barton808/media/Sun4M_SystemArchitecture_edited2.pdf
|
||||
*/
|
||||
|
||||
#define IOMMU_NREGS (4*4096/4)
|
||||
#define IOMMU_CTRL (0x0000 >> 2)
|
||||
#define IOMMU_CTRL_IMPL 0xf0000000 /* Implementation */
|
||||
#define IOMMU_CTRL_VERS 0x0f000000 /* Version */
|
||||
#define IOMMU_CTRL_RNGE 0x0000001c /* Mapping RANGE */
|
||||
#define IOMMU_RNGE_16MB 0x00000000 /* 0xff000000 -> 0xffffffff */
|
||||
#define IOMMU_RNGE_32MB 0x00000004 /* 0xfe000000 -> 0xffffffff */
|
||||
#define IOMMU_RNGE_64MB 0x00000008 /* 0xfc000000 -> 0xffffffff */
|
||||
#define IOMMU_RNGE_128MB 0x0000000c /* 0xf8000000 -> 0xffffffff */
|
||||
#define IOMMU_RNGE_256MB 0x00000010 /* 0xf0000000 -> 0xffffffff */
|
||||
#define IOMMU_RNGE_512MB 0x00000014 /* 0xe0000000 -> 0xffffffff */
|
||||
#define IOMMU_RNGE_1GB 0x00000018 /* 0xc0000000 -> 0xffffffff */
|
||||
#define IOMMU_RNGE_2GB 0x0000001c /* 0x80000000 -> 0xffffffff */
|
||||
#define IOMMU_CTRL_ENAB 0x00000001 /* IOMMU Enable */
|
||||
#define IOMMU_CTRL_MASK 0x0000001d
|
||||
|
||||
#define IOMMU_BASE (0x0004 >> 2)
|
||||
#define IOMMU_BASE_MASK 0x07fffc00
|
||||
|
||||
#define IOMMU_TLBFLUSH (0x0014 >> 2)
|
||||
#define IOMMU_TLBFLUSH_MASK 0xffffffff
|
||||
|
||||
#define IOMMU_PGFLUSH (0x0018 >> 2)
|
||||
#define IOMMU_PGFLUSH_MASK 0xffffffff
|
||||
|
||||
#define IOMMU_AFSR (0x1000 >> 2)
|
||||
#define IOMMU_AFSR_ERR 0x80000000 /* LE, TO, or BE asserted */
|
||||
#define IOMMU_AFSR_LE 0x40000000 /* SBUS reports error after
|
||||
transaction */
|
||||
#define IOMMU_AFSR_TO 0x20000000 /* Write access took more than
|
||||
12.8 us. */
|
||||
#define IOMMU_AFSR_BE 0x10000000 /* Write access received error
|
||||
acknowledge */
|
||||
#define IOMMU_AFSR_SIZE 0x0e000000 /* Size of transaction causing error */
|
||||
#define IOMMU_AFSR_S 0x01000000 /* Sparc was in supervisor mode */
|
||||
#define IOMMU_AFSR_RESV 0x00800000 /* Reserved, forced to 0x8 by
|
||||
hardware */
|
||||
#define IOMMU_AFSR_ME 0x00080000 /* Multiple errors occurred */
|
||||
#define IOMMU_AFSR_RD 0x00040000 /* A read operation was in progress */
|
||||
#define IOMMU_AFSR_FAV 0x00020000 /* IOMMU afar has valid contents */
|
||||
#define IOMMU_AFSR_MASK 0xff0fffff
|
||||
|
||||
#define IOMMU_AFAR (0x1004 >> 2)
|
||||
|
||||
#define IOMMU_AER (0x1008 >> 2) /* Arbiter Enable Register */
|
||||
#define IOMMU_AER_EN_P0_ARB 0x00000001 /* MBus master 0x8 (Always 1) */
|
||||
#define IOMMU_AER_EN_P1_ARB 0x00000002 /* MBus master 0x9 */
|
||||
#define IOMMU_AER_EN_P2_ARB 0x00000004 /* MBus master 0xa */
|
||||
#define IOMMU_AER_EN_P3_ARB 0x00000008 /* MBus master 0xb */
|
||||
#define IOMMU_AER_EN_0 0x00010000 /* SBus slot 0 */
|
||||
#define IOMMU_AER_EN_1 0x00020000 /* SBus slot 1 */
|
||||
#define IOMMU_AER_EN_2 0x00040000 /* SBus slot 2 */
|
||||
#define IOMMU_AER_EN_3 0x00080000 /* SBus slot 3 */
|
||||
#define IOMMU_AER_EN_F 0x00100000 /* SBus on-board */
|
||||
#define IOMMU_AER_SBW 0x80000000 /* S-to-M asynchronous writes */
|
||||
#define IOMMU_AER_MASK 0x801f000f
|
||||
|
||||
#define IOMMU_SBCFG0 (0x1010 >> 2) /* SBUS configration per-slot */
|
||||
#define IOMMU_SBCFG1 (0x1014 >> 2) /* SBUS configration per-slot */
|
||||
#define IOMMU_SBCFG2 (0x1018 >> 2) /* SBUS configration per-slot */
|
||||
#define IOMMU_SBCFG3 (0x101c >> 2) /* SBUS configration per-slot */
|
||||
#define IOMMU_SBCFG_SAB30 0x00010000 /* Phys-address bit 30 when
|
||||
bypass enabled */
|
||||
#define IOMMU_SBCFG_BA16 0x00000004 /* Slave supports 16 byte bursts */
|
||||
#define IOMMU_SBCFG_BA8 0x00000002 /* Slave supports 8 byte bursts */
|
||||
#define IOMMU_SBCFG_BYPASS 0x00000001 /* Bypass IOMMU, treat all addresses
|
||||
produced by this device as pure
|
||||
physical. */
|
||||
#define IOMMU_SBCFG_MASK 0x00010003
|
||||
|
||||
#define IOMMU_ARBEN (0x2000 >> 2) /* SBUS arbitration enable */
|
||||
#define IOMMU_ARBEN_MASK 0x001f0000
|
||||
#define IOMMU_MID 0x00000008
|
||||
|
||||
#define IOMMU_MASK_ID (0x3018 >> 2) /* Mask ID */
|
||||
#define IOMMU_MASK_ID_MASK 0x00ffffff
|
||||
|
||||
#define IOMMU_MSII_MASK 0x26000000 /* microSPARC II mask number */
|
||||
#define IOMMU_TS_MASK 0x23000000 /* turboSPARC mask number */
|
||||
|
||||
/* The format of an iopte in the page tables */
|
||||
#define IOPTE_PAGE 0xffffff00 /* Physical page number (PA[35:12]) */
|
||||
#define IOPTE_CACHE 0x00000080 /* Cached (in vme IOCACHE or
|
||||
Viking/MXCC) */
|
||||
#define IOPTE_WRITE 0x00000004 /* Writable */
|
||||
#define IOPTE_VALID 0x00000002 /* IOPTE is valid */
|
||||
#define IOPTE_WAZ 0x00000001 /* Write as zeros */
|
||||
|
||||
#define IOMMU_PAGE_SHIFT 12
|
||||
#define IOMMU_PAGE_SIZE (1 << IOMMU_PAGE_SHIFT)
|
||||
#define IOMMU_PAGE_MASK ~(IOMMU_PAGE_SIZE - 1)
|
||||
|
||||
typedef struct IOMMUState {
|
||||
SysBusDevice busdev;
|
||||
MemoryRegion iomem;
|
||||
uint32_t regs[IOMMU_NREGS];
|
||||
hwaddr iostart;
|
||||
qemu_irq irq;
|
||||
uint32_t version;
|
||||
} IOMMUState;
|
||||
|
||||
static uint64_t iommu_mem_read(void *opaque, hwaddr addr,
|
||||
unsigned size)
|
||||
{
|
||||
IOMMUState *s = opaque;
|
||||
hwaddr saddr;
|
||||
uint32_t ret;
|
||||
|
||||
saddr = addr >> 2;
|
||||
switch (saddr) {
|
||||
default:
|
||||
ret = s->regs[saddr];
|
||||
break;
|
||||
case IOMMU_AFAR:
|
||||
case IOMMU_AFSR:
|
||||
ret = s->regs[saddr];
|
||||
qemu_irq_lower(s->irq);
|
||||
break;
|
||||
}
|
||||
trace_sun4m_iommu_mem_readl(saddr, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iommu_mem_write(void *opaque, hwaddr addr,
|
||||
uint64_t val, unsigned size)
|
||||
{
|
||||
IOMMUState *s = opaque;
|
||||
hwaddr saddr;
|
||||
|
||||
saddr = addr >> 2;
|
||||
trace_sun4m_iommu_mem_writel(saddr, val);
|
||||
switch (saddr) {
|
||||
case IOMMU_CTRL:
|
||||
switch (val & IOMMU_CTRL_RNGE) {
|
||||
case IOMMU_RNGE_16MB:
|
||||
s->iostart = 0xffffffffff000000ULL;
|
||||
break;
|
||||
case IOMMU_RNGE_32MB:
|
||||
s->iostart = 0xfffffffffe000000ULL;
|
||||
break;
|
||||
case IOMMU_RNGE_64MB:
|
||||
s->iostart = 0xfffffffffc000000ULL;
|
||||
break;
|
||||
case IOMMU_RNGE_128MB:
|
||||
s->iostart = 0xfffffffff8000000ULL;
|
||||
break;
|
||||
case IOMMU_RNGE_256MB:
|
||||
s->iostart = 0xfffffffff0000000ULL;
|
||||
break;
|
||||
case IOMMU_RNGE_512MB:
|
||||
s->iostart = 0xffffffffe0000000ULL;
|
||||
break;
|
||||
case IOMMU_RNGE_1GB:
|
||||
s->iostart = 0xffffffffc0000000ULL;
|
||||
break;
|
||||
default:
|
||||
case IOMMU_RNGE_2GB:
|
||||
s->iostart = 0xffffffff80000000ULL;
|
||||
break;
|
||||
}
|
||||
trace_sun4m_iommu_mem_writel_ctrl(s->iostart);
|
||||
s->regs[saddr] = ((val & IOMMU_CTRL_MASK) | s->version);
|
||||
break;
|
||||
case IOMMU_BASE:
|
||||
s->regs[saddr] = val & IOMMU_BASE_MASK;
|
||||
break;
|
||||
case IOMMU_TLBFLUSH:
|
||||
trace_sun4m_iommu_mem_writel_tlbflush(val);
|
||||
s->regs[saddr] = val & IOMMU_TLBFLUSH_MASK;
|
||||
break;
|
||||
case IOMMU_PGFLUSH:
|
||||
trace_sun4m_iommu_mem_writel_pgflush(val);
|
||||
s->regs[saddr] = val & IOMMU_PGFLUSH_MASK;
|
||||
break;
|
||||
case IOMMU_AFAR:
|
||||
s->regs[saddr] = val;
|
||||
qemu_irq_lower(s->irq);
|
||||
break;
|
||||
case IOMMU_AER:
|
||||
s->regs[saddr] = (val & IOMMU_AER_MASK) | IOMMU_AER_EN_P0_ARB;
|
||||
break;
|
||||
case IOMMU_AFSR:
|
||||
s->regs[saddr] = (val & IOMMU_AFSR_MASK) | IOMMU_AFSR_RESV;
|
||||
qemu_irq_lower(s->irq);
|
||||
break;
|
||||
case IOMMU_SBCFG0:
|
||||
case IOMMU_SBCFG1:
|
||||
case IOMMU_SBCFG2:
|
||||
case IOMMU_SBCFG3:
|
||||
s->regs[saddr] = val & IOMMU_SBCFG_MASK;
|
||||
break;
|
||||
case IOMMU_ARBEN:
|
||||
// XXX implement SBus probing: fault when reading unmapped
|
||||
// addresses, fault cause and address stored to MMU/IOMMU
|
||||
s->regs[saddr] = (val & IOMMU_ARBEN_MASK) | IOMMU_MID;
|
||||
break;
|
||||
case IOMMU_MASK_ID:
|
||||
s->regs[saddr] |= val & IOMMU_MASK_ID_MASK;
|
||||
break;
|
||||
default:
|
||||
s->regs[saddr] = val;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static const MemoryRegionOps iommu_mem_ops = {
|
||||
.read = iommu_mem_read,
|
||||
.write = iommu_mem_write,
|
||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
.valid = {
|
||||
.min_access_size = 4,
|
||||
.max_access_size = 4,
|
||||
},
|
||||
};
|
||||
|
||||
static uint32_t iommu_page_get_flags(IOMMUState *s, hwaddr addr)
|
||||
{
|
||||
uint32_t ret;
|
||||
hwaddr iopte;
|
||||
hwaddr pa = addr;
|
||||
|
||||
iopte = s->regs[IOMMU_BASE] << 4;
|
||||
addr &= ~s->iostart;
|
||||
iopte += (addr >> (IOMMU_PAGE_SHIFT - 2)) & ~3;
|
||||
ret = ldl_be_phys(iopte);
|
||||
trace_sun4m_iommu_page_get_flags(pa, iopte, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static hwaddr iommu_translate_pa(hwaddr addr,
|
||||
uint32_t pte)
|
||||
{
|
||||
hwaddr pa;
|
||||
|
||||
pa = ((pte & IOPTE_PAGE) << 4) + (addr & ~IOMMU_PAGE_MASK);
|
||||
trace_sun4m_iommu_translate_pa(addr, pa, pte);
|
||||
return pa;
|
||||
}
|
||||
|
||||
static void iommu_bad_addr(IOMMUState *s, hwaddr addr,
|
||||
int is_write)
|
||||
{
|
||||
trace_sun4m_iommu_bad_addr(addr);
|
||||
s->regs[IOMMU_AFSR] = IOMMU_AFSR_ERR | IOMMU_AFSR_LE | IOMMU_AFSR_RESV |
|
||||
IOMMU_AFSR_FAV;
|
||||
if (!is_write)
|
||||
s->regs[IOMMU_AFSR] |= IOMMU_AFSR_RD;
|
||||
s->regs[IOMMU_AFAR] = addr;
|
||||
qemu_irq_raise(s->irq);
|
||||
}
|
||||
|
||||
void sparc_iommu_memory_rw(void *opaque, hwaddr addr,
|
||||
uint8_t *buf, int len, int is_write)
|
||||
{
|
||||
int l;
|
||||
uint32_t flags;
|
||||
hwaddr page, phys_addr;
|
||||
|
||||
while (len > 0) {
|
||||
page = addr & IOMMU_PAGE_MASK;
|
||||
l = (page + IOMMU_PAGE_SIZE) - addr;
|
||||
if (l > len)
|
||||
l = len;
|
||||
flags = iommu_page_get_flags(opaque, page);
|
||||
if (!(flags & IOPTE_VALID)) {
|
||||
iommu_bad_addr(opaque, page, is_write);
|
||||
return;
|
||||
}
|
||||
phys_addr = iommu_translate_pa(addr, flags);
|
||||
if (is_write) {
|
||||
if (!(flags & IOPTE_WRITE)) {
|
||||
iommu_bad_addr(opaque, page, is_write);
|
||||
return;
|
||||
}
|
||||
cpu_physical_memory_write(phys_addr, buf, l);
|
||||
} else {
|
||||
cpu_physical_memory_read(phys_addr, buf, l);
|
||||
}
|
||||
len -= l;
|
||||
buf += l;
|
||||
addr += l;
|
||||
}
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_iommu = {
|
||||
.name ="iommu",
|
||||
.version_id = 2,
|
||||
.minimum_version_id = 2,
|
||||
.minimum_version_id_old = 2,
|
||||
.fields = (VMStateField []) {
|
||||
VMSTATE_UINT32_ARRAY(regs, IOMMUState, IOMMU_NREGS),
|
||||
VMSTATE_UINT64(iostart, IOMMUState),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
static void iommu_reset(DeviceState *d)
|
||||
{
|
||||
IOMMUState *s = container_of(d, IOMMUState, busdev.qdev);
|
||||
|
||||
memset(s->regs, 0, IOMMU_NREGS * 4);
|
||||
s->iostart = 0;
|
||||
s->regs[IOMMU_CTRL] = s->version;
|
||||
s->regs[IOMMU_ARBEN] = IOMMU_MID;
|
||||
s->regs[IOMMU_AFSR] = IOMMU_AFSR_RESV;
|
||||
s->regs[IOMMU_AER] = IOMMU_AER_EN_P0_ARB | IOMMU_AER_EN_P1_ARB;
|
||||
s->regs[IOMMU_MASK_ID] = IOMMU_TS_MASK;
|
||||
}
|
||||
|
||||
static int iommu_init1(SysBusDevice *dev)
|
||||
{
|
||||
IOMMUState *s = FROM_SYSBUS(IOMMUState, dev);
|
||||
|
||||
sysbus_init_irq(dev, &s->irq);
|
||||
|
||||
memory_region_init_io(&s->iomem, &iommu_mem_ops, s, "iommu",
|
||||
IOMMU_NREGS * sizeof(uint32_t));
|
||||
sysbus_init_mmio(dev, &s->iomem);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static Property iommu_properties[] = {
|
||||
DEFINE_PROP_HEX32("version", IOMMUState, version, 0),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static void iommu_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
|
||||
|
||||
k->init = iommu_init1;
|
||||
dc->reset = iommu_reset;
|
||||
dc->vmsd = &vmstate_iommu;
|
||||
dc->props = iommu_properties;
|
||||
}
|
||||
|
||||
static const TypeInfo iommu_info = {
|
||||
.name = "iommu",
|
||||
.parent = TYPE_SYS_BUS_DEVICE,
|
||||
.instance_size = sizeof(IOMMUState),
|
||||
.class_init = iommu_class_init,
|
||||
};
|
||||
|
||||
static void iommu_register_types(void)
|
||||
{
|
||||
type_register_static(&iommu_info);
|
||||
}
|
||||
|
||||
type_init(iommu_register_types)
|
Loading…
Add table
Add a link
Reference in a new issue