mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-12-11 16:00:50 -07:00
Revert "Get rid of _t suffix"
In the very least, a change like this requires discussion on the list.
The naming convention is goofy and it causes a massive merge problem. Something
like this _must_ be presented on the list first so people can provide input
and cope with it.
This reverts commit 99a0949b72.
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
This commit is contained in:
parent
99a0949b72
commit
c227f0995e
316 changed files with 3332 additions and 3325 deletions
218
exec.c
218
exec.c
|
|
@ -83,7 +83,7 @@ int code_gen_max_blocks;
|
|||
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
|
||||
static int nb_tbs;
|
||||
/* any access to the tbs or the page table must use this lock */
|
||||
a_spinlock tb_lock = SPIN_LOCK_UNLOCKED;
|
||||
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
|
||||
|
||||
#if defined(__arm__) || defined(__sparc_v9__)
|
||||
/* The prologue must be reachable with a direct jump. ARM and Sparc64
|
||||
|
|
@ -115,8 +115,8 @@ static int in_migration;
|
|||
|
||||
typedef struct RAMBlock {
|
||||
uint8_t *host;
|
||||
a_ram_addr offset;
|
||||
a_ram_addr length;
|
||||
ram_addr_t offset;
|
||||
ram_addr_t length;
|
||||
struct RAMBlock *next;
|
||||
} RAMBlock;
|
||||
|
||||
|
|
@ -124,7 +124,7 @@ static RAMBlock *ram_blocks;
|
|||
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
|
||||
then we can no longer assume contiguous ram offsets, and external uses
|
||||
of this variable will break. */
|
||||
a_ram_addr last_ram_offset;
|
||||
ram_addr_t last_ram_offset;
|
||||
#endif
|
||||
|
||||
CPUState *first_cpu;
|
||||
|
|
@ -153,8 +153,8 @@ typedef struct PageDesc {
|
|||
|
||||
typedef struct PhysPageDesc {
|
||||
/* offset in host memory of the page + io_index in the low bits */
|
||||
a_ram_addr phys_offset;
|
||||
a_ram_addr region_offset;
|
||||
ram_addr_t phys_offset;
|
||||
ram_addr_t region_offset;
|
||||
} PhysPageDesc;
|
||||
|
||||
#define L2_BITS 10
|
||||
|
|
@ -203,13 +203,13 @@ static int tb_flush_count;
|
|||
static int tb_phys_invalidate_count;
|
||||
|
||||
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
|
||||
typedef struct subpage {
|
||||
a_target_phys_addr base;
|
||||
typedef struct subpage_t {
|
||||
target_phys_addr_t base;
|
||||
CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
|
||||
CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
|
||||
void *opaque[TARGET_PAGE_SIZE][2][4];
|
||||
a_ram_addr region_offset[TARGET_PAGE_SIZE][2][4];
|
||||
} a_subpage;
|
||||
ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
|
||||
} subpage_t;
|
||||
|
||||
#ifdef _WIN32
|
||||
static void map_exec(void *addr, long size)
|
||||
|
|
@ -346,7 +346,7 @@ static inline PageDesc *page_find(target_ulong index)
|
|||
return p + (index & (L2_SIZE - 1));
|
||||
}
|
||||
|
||||
static PhysPageDesc *phys_page_find_alloc(a_target_phys_addr index, int alloc)
|
||||
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
|
||||
{
|
||||
void **lp, **p;
|
||||
PhysPageDesc *pd;
|
||||
|
|
@ -385,14 +385,14 @@ static PhysPageDesc *phys_page_find_alloc(a_target_phys_addr index, int alloc)
|
|||
return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
|
||||
}
|
||||
|
||||
static inline PhysPageDesc *phys_page_find(a_target_phys_addr index)
|
||||
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
|
||||
{
|
||||
return phys_page_find_alloc(index, 0);
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
static void tlb_protect_code(a_ram_addr ram_addr);
|
||||
static void tlb_unprotect_code_phys(CPUState *env, a_ram_addr ram_addr,
|
||||
static void tlb_protect_code(ram_addr_t ram_addr);
|
||||
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
|
||||
target_ulong vaddr);
|
||||
#define mmap_lock() do { } while(0)
|
||||
#define mmap_unlock() do { } while(0)
|
||||
|
|
@ -766,7 +766,7 @@ void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
|
|||
CPUState *env;
|
||||
PageDesc *p;
|
||||
unsigned int h, n1;
|
||||
a_target_phys_addr phys_pc;
|
||||
target_phys_addr_t phys_pc;
|
||||
TranslationBlock *tb1, *tb2;
|
||||
|
||||
/* remove the TB from the hash list */
|
||||
|
|
@ -914,7 +914,7 @@ TranslationBlock *tb_gen_code(CPUState *env,
|
|||
the same physical page. 'is_cpu_write_access' should be true if called
|
||||
from a real cpu write access: the virtual CPU will exit the current
|
||||
TB if code is modified inside this TB. */
|
||||
void tb_invalidate_phys_page_range(a_target_phys_addr start, a_target_phys_addr end,
|
||||
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
|
||||
int is_cpu_write_access)
|
||||
{
|
||||
TranslationBlock *tb, *tb_next, *saved_tb;
|
||||
|
|
@ -1021,7 +1021,7 @@ void tb_invalidate_phys_page_range(a_target_phys_addr start, a_target_phys_addr
|
|||
}
|
||||
|
||||
/* len must be <= 8 and start must be a multiple of len */
|
||||
static inline void tb_invalidate_phys_page_fast(a_target_phys_addr start, int len)
|
||||
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
|
||||
{
|
||||
PageDesc *p;
|
||||
int offset, b;
|
||||
|
|
@ -1048,7 +1048,7 @@ static inline void tb_invalidate_phys_page_fast(a_target_phys_addr start, int le
|
|||
}
|
||||
|
||||
#if !defined(CONFIG_SOFTMMU)
|
||||
static void tb_invalidate_phys_page(a_target_phys_addr addr,
|
||||
static void tb_invalidate_phys_page(target_phys_addr_t addr,
|
||||
unsigned long pc, void *puc)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
|
|
@ -1310,9 +1310,9 @@ static void tb_reset_jump_recursive(TranslationBlock *tb)
|
|||
#if defined(TARGET_HAS_ICE)
|
||||
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
|
||||
{
|
||||
a_target_phys_addr addr;
|
||||
target_phys_addr_t addr;
|
||||
target_ulong pd;
|
||||
a_ram_addr ram_addr;
|
||||
ram_addr_t ram_addr;
|
||||
PhysPageDesc *p;
|
||||
|
||||
addr = cpu_get_phys_page_debug(env, pc);
|
||||
|
|
@ -1533,7 +1533,7 @@ static void cpu_unlink_tb(CPUState *env)
|
|||
signals are used primarily to interrupt blocking syscalls. */
|
||||
#else
|
||||
TranslationBlock *tb;
|
||||
static a_spinlock interrupt_lock = SPIN_LOCK_UNLOCKED;
|
||||
static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
|
||||
|
||||
tb = env->current_tb;
|
||||
/* if the cpu is currently executing code, we must unlink it and
|
||||
|
|
@ -1810,7 +1810,7 @@ void tlb_flush_page(CPUState *env, target_ulong addr)
|
|||
|
||||
/* update the TLBs so that writes to code in the virtual page 'addr'
|
||||
can be detected */
|
||||
static void tlb_protect_code(a_ram_addr ram_addr)
|
||||
static void tlb_protect_code(ram_addr_t ram_addr)
|
||||
{
|
||||
cpu_physical_memory_reset_dirty(ram_addr,
|
||||
ram_addr + TARGET_PAGE_SIZE,
|
||||
|
|
@ -1819,7 +1819,7 @@ static void tlb_protect_code(a_ram_addr ram_addr)
|
|||
|
||||
/* update the TLB so that writes in physical page 'phys_addr' are no longer
|
||||
tested for self modifying code */
|
||||
static void tlb_unprotect_code_phys(CPUState *env, a_ram_addr ram_addr,
|
||||
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
|
||||
target_ulong vaddr)
|
||||
{
|
||||
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
|
||||
|
|
@ -1838,7 +1838,7 @@ static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
|
|||
}
|
||||
|
||||
/* Note: start and end must be within the same ram block. */
|
||||
void cpu_physical_memory_reset_dirty(a_ram_addr start, a_ram_addr end,
|
||||
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
|
||||
int dirty_flags)
|
||||
{
|
||||
CPUState *env;
|
||||
|
|
@ -1892,8 +1892,8 @@ int cpu_physical_memory_get_dirty_tracking(void)
|
|||
return in_migration;
|
||||
}
|
||||
|
||||
int cpu_physical_sync_dirty_bitmap(a_target_phys_addr start_addr,
|
||||
a_target_phys_addr end_addr)
|
||||
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
|
||||
target_phys_addr_t end_addr)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
|
@ -1904,7 +1904,7 @@ int cpu_physical_sync_dirty_bitmap(a_target_phys_addr start_addr,
|
|||
|
||||
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
|
||||
{
|
||||
a_ram_addr ram_addr;
|
||||
ram_addr_t ram_addr;
|
||||
void *p;
|
||||
|
||||
if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
|
||||
|
|
@ -1952,7 +1952,7 @@ static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
|
|||
(can only happen in non SOFTMMU mode for I/O pages or pages
|
||||
conflicting with the host address space). */
|
||||
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
|
||||
a_target_phys_addr paddr, int prot,
|
||||
target_phys_addr_t paddr, int prot,
|
||||
int mmu_idx, int is_softmmu)
|
||||
{
|
||||
PhysPageDesc *p;
|
||||
|
|
@ -1960,11 +1960,11 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
|
|||
unsigned int index;
|
||||
target_ulong address;
|
||||
target_ulong code_address;
|
||||
a_target_phys_addr addend;
|
||||
target_phys_addr_t addend;
|
||||
int ret;
|
||||
CPUTLBEntry *te;
|
||||
CPUWatchpoint *wp;
|
||||
a_target_phys_addr iotlb;
|
||||
target_phys_addr_t iotlb;
|
||||
|
||||
p = phys_page_find(paddr >> TARGET_PAGE_BITS);
|
||||
if (!p) {
|
||||
|
|
@ -2061,7 +2061,7 @@ void tlb_flush_page(CPUState *env, target_ulong addr)
|
|||
}
|
||||
|
||||
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
|
||||
a_target_phys_addr paddr, int prot,
|
||||
target_phys_addr_t paddr, int prot,
|
||||
int mmu_idx, int is_softmmu)
|
||||
{
|
||||
return 0;
|
||||
|
|
@ -2267,10 +2267,10 @@ static inline void tlb_set_dirty(CPUState *env,
|
|||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
|
||||
static int subpage_register (a_subpage *mmio, uint32_t start, uint32_t end,
|
||||
a_ram_addr memory, a_ram_addr region_offset);
|
||||
static void *subpage_init (a_target_phys_addr base, a_ram_addr *phys,
|
||||
a_ram_addr orig_memory, a_ram_addr region_offset);
|
||||
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
|
||||
ram_addr_t memory, ram_addr_t region_offset);
|
||||
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
|
||||
ram_addr_t orig_memory, ram_addr_t region_offset);
|
||||
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
|
||||
need_subpage) \
|
||||
do { \
|
||||
|
|
@ -2298,15 +2298,15 @@ static void *subpage_init (a_target_phys_addr base, a_ram_addr *phys,
|
|||
start_addr and region_offset are rounded down to a page boundary
|
||||
before calculating this offset. This should not be a problem unless
|
||||
the low bits of start_addr and region_offset differ. */
|
||||
void cpu_register_physical_memory_offset(a_target_phys_addr start_addr,
|
||||
a_ram_addr size,
|
||||
a_ram_addr phys_offset,
|
||||
a_ram_addr region_offset)
|
||||
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
|
||||
ram_addr_t size,
|
||||
ram_addr_t phys_offset,
|
||||
ram_addr_t region_offset)
|
||||
{
|
||||
a_target_phys_addr addr, end_addr;
|
||||
target_phys_addr_t addr, end_addr;
|
||||
PhysPageDesc *p;
|
||||
CPUState *env;
|
||||
a_ram_addr orig_size = size;
|
||||
ram_addr_t orig_size = size;
|
||||
void *subpage;
|
||||
|
||||
if (kvm_enabled())
|
||||
|
|
@ -2317,12 +2317,12 @@ void cpu_register_physical_memory_offset(a_target_phys_addr start_addr,
|
|||
}
|
||||
region_offset &= TARGET_PAGE_MASK;
|
||||
size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
|
||||
end_addr = start_addr + (a_target_phys_addr)size;
|
||||
end_addr = start_addr + (target_phys_addr_t)size;
|
||||
for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
|
||||
p = phys_page_find(addr >> TARGET_PAGE_BITS);
|
||||
if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
|
||||
a_ram_addr orig_memory = p->phys_offset;
|
||||
a_target_phys_addr start_addr2, end_addr2;
|
||||
ram_addr_t orig_memory = p->phys_offset;
|
||||
target_phys_addr_t start_addr2, end_addr2;
|
||||
int need_subpage = 0;
|
||||
|
||||
CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
|
||||
|
|
@ -2353,7 +2353,7 @@ void cpu_register_physical_memory_offset(a_target_phys_addr start_addr,
|
|||
(phys_offset & IO_MEM_ROMD)) {
|
||||
phys_offset += TARGET_PAGE_SIZE;
|
||||
} else {
|
||||
a_target_phys_addr start_addr2, end_addr2;
|
||||
target_phys_addr_t start_addr2, end_addr2;
|
||||
int need_subpage = 0;
|
||||
|
||||
CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
|
||||
|
|
@ -2381,7 +2381,7 @@ void cpu_register_physical_memory_offset(a_target_phys_addr start_addr,
|
|||
}
|
||||
|
||||
/* XXX: temporary until new memory mapping API */
|
||||
a_ram_addr cpu_get_physical_page_desc(a_target_phys_addr addr)
|
||||
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
|
||||
{
|
||||
PhysPageDesc *p;
|
||||
|
||||
|
|
@ -2391,19 +2391,19 @@ a_ram_addr cpu_get_physical_page_desc(a_target_phys_addr addr)
|
|||
return p->phys_offset;
|
||||
}
|
||||
|
||||
void qemu_register_coalesced_mmio(a_target_phys_addr addr, a_ram_addr size)
|
||||
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
|
||||
{
|
||||
if (kvm_enabled())
|
||||
kvm_coalesce_mmio_region(addr, size);
|
||||
}
|
||||
|
||||
void qemu_unregister_coalesced_mmio(a_target_phys_addr addr, a_ram_addr size)
|
||||
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
|
||||
{
|
||||
if (kvm_enabled())
|
||||
kvm_uncoalesce_mmio_region(addr, size);
|
||||
}
|
||||
|
||||
a_ram_addr qemu_ram_alloc(a_ram_addr size)
|
||||
ram_addr_t qemu_ram_alloc(ram_addr_t size)
|
||||
{
|
||||
RAMBlock *new_block;
|
||||
|
||||
|
|
@ -2430,7 +2430,7 @@ a_ram_addr qemu_ram_alloc(a_ram_addr size)
|
|||
return new_block->offset;
|
||||
}
|
||||
|
||||
void qemu_ram_free(a_ram_addr addr)
|
||||
void qemu_ram_free(ram_addr_t addr)
|
||||
{
|
||||
/* TODO: implement this. */
|
||||
}
|
||||
|
|
@ -2443,7 +2443,7 @@ void qemu_ram_free(a_ram_addr addr)
|
|||
It should not be used for general purpose DMA.
|
||||
Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
|
||||
*/
|
||||
void *qemu_get_ram_ptr(a_ram_addr addr)
|
||||
void *qemu_get_ram_ptr(ram_addr_t addr)
|
||||
{
|
||||
RAMBlock *prev;
|
||||
RAMBlock **prevp;
|
||||
|
|
@ -2474,7 +2474,7 @@ void *qemu_get_ram_ptr(a_ram_addr addr)
|
|||
|
||||
/* Some of the softmmu routines need to translate from a host pointer
|
||||
(typically a TLB entry) back to a ram offset. */
|
||||
a_ram_addr qemu_ram_addr_from_host(void *ptr)
|
||||
ram_addr_t qemu_ram_addr_from_host(void *ptr)
|
||||
{
|
||||
RAMBlock *prev;
|
||||
RAMBlock **prevp;
|
||||
|
|
@ -2498,7 +2498,7 @@ a_ram_addr qemu_ram_addr_from_host(void *ptr)
|
|||
return block->offset + (host - block->host);
|
||||
}
|
||||
|
||||
static uint32_t unassigned_mem_readb(void *opaque, a_target_phys_addr addr)
|
||||
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
|
||||
{
|
||||
#ifdef DEBUG_UNASSIGNED
|
||||
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
|
||||
|
|
@ -2509,7 +2509,7 @@ static uint32_t unassigned_mem_readb(void *opaque, a_target_phys_addr addr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static uint32_t unassigned_mem_readw(void *opaque, a_target_phys_addr addr)
|
||||
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
|
||||
{
|
||||
#ifdef DEBUG_UNASSIGNED
|
||||
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
|
||||
|
|
@ -2520,7 +2520,7 @@ static uint32_t unassigned_mem_readw(void *opaque, a_target_phys_addr addr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static uint32_t unassigned_mem_readl(void *opaque, a_target_phys_addr addr)
|
||||
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
|
||||
{
|
||||
#ifdef DEBUG_UNASSIGNED
|
||||
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
|
||||
|
|
@ -2531,7 +2531,7 @@ static uint32_t unassigned_mem_readl(void *opaque, a_target_phys_addr addr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void unassigned_mem_writeb(void *opaque, a_target_phys_addr addr, uint32_t val)
|
||||
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
|
||||
{
|
||||
#ifdef DEBUG_UNASSIGNED
|
||||
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
|
||||
|
|
@ -2541,7 +2541,7 @@ static void unassigned_mem_writeb(void *opaque, a_target_phys_addr addr, uint32_
|
|||
#endif
|
||||
}
|
||||
|
||||
static void unassigned_mem_writew(void *opaque, a_target_phys_addr addr, uint32_t val)
|
||||
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
|
||||
{
|
||||
#ifdef DEBUG_UNASSIGNED
|
||||
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
|
||||
|
|
@ -2551,7 +2551,7 @@ static void unassigned_mem_writew(void *opaque, a_target_phys_addr addr, uint32_
|
|||
#endif
|
||||
}
|
||||
|
||||
static void unassigned_mem_writel(void *opaque, a_target_phys_addr addr, uint32_t val)
|
||||
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
|
||||
{
|
||||
#ifdef DEBUG_UNASSIGNED
|
||||
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
|
||||
|
|
@ -2573,7 +2573,7 @@ static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
|
|||
unassigned_mem_writel,
|
||||
};
|
||||
|
||||
static void notdirty_mem_writeb(void *opaque, a_target_phys_addr ram_addr,
|
||||
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
|
||||
uint32_t val)
|
||||
{
|
||||
int dirty_flags;
|
||||
|
|
@ -2593,7 +2593,7 @@ static void notdirty_mem_writeb(void *opaque, a_target_phys_addr ram_addr,
|
|||
tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
|
||||
}
|
||||
|
||||
static void notdirty_mem_writew(void *opaque, a_target_phys_addr ram_addr,
|
||||
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
|
||||
uint32_t val)
|
||||
{
|
||||
int dirty_flags;
|
||||
|
|
@ -2613,7 +2613,7 @@ static void notdirty_mem_writew(void *opaque, a_target_phys_addr ram_addr,
|
|||
tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
|
||||
}
|
||||
|
||||
static void notdirty_mem_writel(void *opaque, a_target_phys_addr ram_addr,
|
||||
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
|
||||
uint32_t val)
|
||||
{
|
||||
int dirty_flags;
|
||||
|
|
@ -2693,39 +2693,39 @@ static void check_watchpoint(int offset, int len_mask, int flags)
|
|||
/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
|
||||
so these check for a hit then pass through to the normal out-of-line
|
||||
phys routines. */
|
||||
static uint32_t watch_mem_readb(void *opaque, a_target_phys_addr addr)
|
||||
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
|
||||
{
|
||||
check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
|
||||
return ldub_phys(addr);
|
||||
}
|
||||
|
||||
static uint32_t watch_mem_readw(void *opaque, a_target_phys_addr addr)
|
||||
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
|
||||
{
|
||||
check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
|
||||
return lduw_phys(addr);
|
||||
}
|
||||
|
||||
static uint32_t watch_mem_readl(void *opaque, a_target_phys_addr addr)
|
||||
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
|
||||
{
|
||||
check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
|
||||
return ldl_phys(addr);
|
||||
}
|
||||
|
||||
static void watch_mem_writeb(void *opaque, a_target_phys_addr addr,
|
||||
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
|
||||
uint32_t val)
|
||||
{
|
||||
check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
|
||||
stb_phys(addr, val);
|
||||
}
|
||||
|
||||
static void watch_mem_writew(void *opaque, a_target_phys_addr addr,
|
||||
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
|
||||
uint32_t val)
|
||||
{
|
||||
check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
|
||||
stw_phys(addr, val);
|
||||
}
|
||||
|
||||
static void watch_mem_writel(void *opaque, a_target_phys_addr addr,
|
||||
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
|
||||
uint32_t val)
|
||||
{
|
||||
check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
|
||||
|
|
@ -2744,7 +2744,7 @@ static CPUWriteMemoryFunc * const watch_mem_write[3] = {
|
|||
watch_mem_writel,
|
||||
};
|
||||
|
||||
static inline uint32_t subpage_readlen (a_subpage *mmio, a_target_phys_addr addr,
|
||||
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
|
||||
unsigned int len)
|
||||
{
|
||||
uint32_t ret;
|
||||
|
|
@ -2761,7 +2761,7 @@ static inline uint32_t subpage_readlen (a_subpage *mmio, a_target_phys_addr addr
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline void subpage_writelen (a_subpage *mmio, a_target_phys_addr addr,
|
||||
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
|
||||
uint32_t value, unsigned int len)
|
||||
{
|
||||
unsigned int idx;
|
||||
|
|
@ -2776,7 +2776,7 @@ static inline void subpage_writelen (a_subpage *mmio, a_target_phys_addr addr,
|
|||
value);
|
||||
}
|
||||
|
||||
static uint32_t subpage_readb (void *opaque, a_target_phys_addr addr)
|
||||
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
|
||||
{
|
||||
#if defined(DEBUG_SUBPAGE)
|
||||
printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
|
||||
|
|
@ -2785,7 +2785,7 @@ static uint32_t subpage_readb (void *opaque, a_target_phys_addr addr)
|
|||
return subpage_readlen(opaque, addr, 0);
|
||||
}
|
||||
|
||||
static void subpage_writeb (void *opaque, a_target_phys_addr addr,
|
||||
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
|
||||
uint32_t value)
|
||||
{
|
||||
#if defined(DEBUG_SUBPAGE)
|
||||
|
|
@ -2794,7 +2794,7 @@ static void subpage_writeb (void *opaque, a_target_phys_addr addr,
|
|||
subpage_writelen(opaque, addr, value, 0);
|
||||
}
|
||||
|
||||
static uint32_t subpage_readw (void *opaque, a_target_phys_addr addr)
|
||||
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
|
||||
{
|
||||
#if defined(DEBUG_SUBPAGE)
|
||||
printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
|
||||
|
|
@ -2803,7 +2803,7 @@ static uint32_t subpage_readw (void *opaque, a_target_phys_addr addr)
|
|||
return subpage_readlen(opaque, addr, 1);
|
||||
}
|
||||
|
||||
static void subpage_writew (void *opaque, a_target_phys_addr addr,
|
||||
static void subpage_writew (void *opaque, target_phys_addr_t addr,
|
||||
uint32_t value)
|
||||
{
|
||||
#if defined(DEBUG_SUBPAGE)
|
||||
|
|
@ -2812,7 +2812,7 @@ static void subpage_writew (void *opaque, a_target_phys_addr addr,
|
|||
subpage_writelen(opaque, addr, value, 1);
|
||||
}
|
||||
|
||||
static uint32_t subpage_readl (void *opaque, a_target_phys_addr addr)
|
||||
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
|
||||
{
|
||||
#if defined(DEBUG_SUBPAGE)
|
||||
printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
|
||||
|
|
@ -2822,7 +2822,7 @@ static uint32_t subpage_readl (void *opaque, a_target_phys_addr addr)
|
|||
}
|
||||
|
||||
static void subpage_writel (void *opaque,
|
||||
a_target_phys_addr addr, uint32_t value)
|
||||
target_phys_addr_t addr, uint32_t value)
|
||||
{
|
||||
#if defined(DEBUG_SUBPAGE)
|
||||
printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
|
||||
|
|
@ -2842,8 +2842,8 @@ static CPUWriteMemoryFunc * const subpage_write[] = {
|
|||
&subpage_writel,
|
||||
};
|
||||
|
||||
static int subpage_register (a_subpage *mmio, uint32_t start, uint32_t end,
|
||||
a_ram_addr memory, a_ram_addr region_offset)
|
||||
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
|
||||
ram_addr_t memory, ram_addr_t region_offset)
|
||||
{
|
||||
int idx, eidx;
|
||||
unsigned int i;
|
||||
|
|
@ -2875,13 +2875,13 @@ static int subpage_register (a_subpage *mmio, uint32_t start, uint32_t end,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void *subpage_init (a_target_phys_addr base, a_ram_addr *phys,
|
||||
a_ram_addr orig_memory, a_ram_addr region_offset)
|
||||
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
|
||||
ram_addr_t orig_memory, ram_addr_t region_offset)
|
||||
{
|
||||
a_subpage *mmio;
|
||||
subpage_t *mmio;
|
||||
int subpage_memory;
|
||||
|
||||
mmio = qemu_mallocz(sizeof(a_subpage));
|
||||
mmio = qemu_mallocz(sizeof(subpage_t));
|
||||
|
||||
mmio->base = base;
|
||||
subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
|
||||
|
|
@ -2981,7 +2981,7 @@ static void io_mem_init(void)
|
|||
|
||||
/* physical memory access (slow version, mainly for debug) */
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
void cpu_physical_memory_rw(a_target_phys_addr addr, uint8_t *buf,
|
||||
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
|
||||
int len, int is_write)
|
||||
{
|
||||
int l, flags;
|
||||
|
|
@ -3022,13 +3022,13 @@ void cpu_physical_memory_rw(a_target_phys_addr addr, uint8_t *buf,
|
|||
}
|
||||
|
||||
#else
|
||||
void cpu_physical_memory_rw(a_target_phys_addr addr, uint8_t *buf,
|
||||
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
|
||||
int len, int is_write)
|
||||
{
|
||||
int l, io_index;
|
||||
uint8_t *ptr;
|
||||
uint32_t val;
|
||||
a_target_phys_addr page;
|
||||
target_phys_addr_t page;
|
||||
unsigned long pd;
|
||||
PhysPageDesc *p;
|
||||
|
||||
|
|
@ -3046,7 +3046,7 @@ void cpu_physical_memory_rw(a_target_phys_addr addr, uint8_t *buf,
|
|||
|
||||
if (is_write) {
|
||||
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
|
||||
a_target_phys_addr addr1 = addr;
|
||||
target_phys_addr_t addr1 = addr;
|
||||
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
|
||||
if (p)
|
||||
addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
|
||||
|
|
@ -3085,7 +3085,7 @@ void cpu_physical_memory_rw(a_target_phys_addr addr, uint8_t *buf,
|
|||
} else {
|
||||
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
|
||||
!(pd & IO_MEM_ROMD)) {
|
||||
a_target_phys_addr addr1 = addr;
|
||||
target_phys_addr_t addr1 = addr;
|
||||
/* I/O case */
|
||||
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
|
||||
if (p)
|
||||
|
|
@ -3120,12 +3120,12 @@ void cpu_physical_memory_rw(a_target_phys_addr addr, uint8_t *buf,
|
|||
}
|
||||
|
||||
/* used for ROM loading : can write in RAM and ROM */
|
||||
void cpu_physical_memory_write_rom(a_target_phys_addr addr,
|
||||
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
|
||||
const uint8_t *buf, int len)
|
||||
{
|
||||
int l;
|
||||
uint8_t *ptr;
|
||||
a_target_phys_addr page;
|
||||
target_phys_addr_t page;
|
||||
unsigned long pd;
|
||||
PhysPageDesc *p;
|
||||
|
||||
|
|
@ -3160,8 +3160,8 @@ void cpu_physical_memory_write_rom(a_target_phys_addr addr,
|
|||
|
||||
typedef struct {
|
||||
void *buffer;
|
||||
a_target_phys_addr addr;
|
||||
a_target_phys_addr len;
|
||||
target_phys_addr_t addr;
|
||||
target_phys_addr_t len;
|
||||
} BounceBuffer;
|
||||
|
||||
static BounceBuffer bounce;
|
||||
|
|
@ -3211,16 +3211,16 @@ static void cpu_notify_map_clients(void)
|
|||
* Use cpu_register_map_client() to know when retrying the map operation is
|
||||
* likely to succeed.
|
||||
*/
|
||||
void *cpu_physical_memory_map(a_target_phys_addr addr,
|
||||
a_target_phys_addr *plen,
|
||||
void *cpu_physical_memory_map(target_phys_addr_t addr,
|
||||
target_phys_addr_t *plen,
|
||||
int is_write)
|
||||
{
|
||||
a_target_phys_addr len = *plen;
|
||||
a_target_phys_addr done = 0;
|
||||
target_phys_addr_t len = *plen;
|
||||
target_phys_addr_t done = 0;
|
||||
int l;
|
||||
uint8_t *ret = NULL;
|
||||
uint8_t *ptr;
|
||||
a_target_phys_addr page;
|
||||
target_phys_addr_t page;
|
||||
unsigned long pd;
|
||||
PhysPageDesc *p;
|
||||
unsigned long addr1;
|
||||
|
|
@ -3270,12 +3270,12 @@ void *cpu_physical_memory_map(a_target_phys_addr addr,
|
|||
* Will also mark the memory as dirty if is_write == 1. access_len gives
|
||||
* the amount of memory that was actually read or written by the caller.
|
||||
*/
|
||||
void cpu_physical_memory_unmap(void *buffer, a_target_phys_addr len,
|
||||
int is_write, a_target_phys_addr access_len)
|
||||
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
|
||||
int is_write, target_phys_addr_t access_len)
|
||||
{
|
||||
if (buffer != bounce.buffer) {
|
||||
if (is_write) {
|
||||
a_ram_addr addr1 = qemu_ram_addr_from_host(buffer);
|
||||
ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
|
||||
while (access_len) {
|
||||
unsigned l;
|
||||
l = TARGET_PAGE_SIZE;
|
||||
|
|
@ -3303,7 +3303,7 @@ void cpu_physical_memory_unmap(void *buffer, a_target_phys_addr len,
|
|||
}
|
||||
|
||||
/* warning: addr must be aligned */
|
||||
uint32_t ldl_phys(a_target_phys_addr addr)
|
||||
uint32_t ldl_phys(target_phys_addr_t addr)
|
||||
{
|
||||
int io_index;
|
||||
uint8_t *ptr;
|
||||
|
|
@ -3335,7 +3335,7 @@ uint32_t ldl_phys(a_target_phys_addr addr)
|
|||
}
|
||||
|
||||
/* warning: addr must be aligned */
|
||||
uint64_t ldq_phys(a_target_phys_addr addr)
|
||||
uint64_t ldq_phys(target_phys_addr_t addr)
|
||||
{
|
||||
int io_index;
|
||||
uint8_t *ptr;
|
||||
|
|
@ -3373,7 +3373,7 @@ uint64_t ldq_phys(a_target_phys_addr addr)
|
|||
}
|
||||
|
||||
/* XXX: optimize */
|
||||
uint32_t ldub_phys(a_target_phys_addr addr)
|
||||
uint32_t ldub_phys(target_phys_addr_t addr)
|
||||
{
|
||||
uint8_t val;
|
||||
cpu_physical_memory_read(addr, &val, 1);
|
||||
|
|
@ -3381,7 +3381,7 @@ uint32_t ldub_phys(a_target_phys_addr addr)
|
|||
}
|
||||
|
||||
/* XXX: optimize */
|
||||
uint32_t lduw_phys(a_target_phys_addr addr)
|
||||
uint32_t lduw_phys(target_phys_addr_t addr)
|
||||
{
|
||||
uint16_t val;
|
||||
cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
|
||||
|
|
@ -3391,7 +3391,7 @@ uint32_t lduw_phys(a_target_phys_addr addr)
|
|||
/* warning: addr must be aligned. The ram page is not masked as dirty
|
||||
and the code inside is not invalidated. It is useful if the dirty
|
||||
bits are used to track modified PTEs */
|
||||
void stl_phys_notdirty(a_target_phys_addr addr, uint32_t val)
|
||||
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
|
||||
{
|
||||
int io_index;
|
||||
uint8_t *ptr;
|
||||
|
|
@ -3427,7 +3427,7 @@ void stl_phys_notdirty(a_target_phys_addr addr, uint32_t val)
|
|||
}
|
||||
}
|
||||
|
||||
void stq_phys_notdirty(a_target_phys_addr addr, uint64_t val)
|
||||
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
|
||||
{
|
||||
int io_index;
|
||||
uint8_t *ptr;
|
||||
|
|
@ -3460,7 +3460,7 @@ void stq_phys_notdirty(a_target_phys_addr addr, uint64_t val)
|
|||
}
|
||||
|
||||
/* warning: addr must be aligned */
|
||||
void stl_phys(a_target_phys_addr addr, uint32_t val)
|
||||
void stl_phys(target_phys_addr_t addr, uint32_t val)
|
||||
{
|
||||
int io_index;
|
||||
uint8_t *ptr;
|
||||
|
|
@ -3496,21 +3496,21 @@ void stl_phys(a_target_phys_addr addr, uint32_t val)
|
|||
}
|
||||
|
||||
/* XXX: optimize */
|
||||
void stb_phys(a_target_phys_addr addr, uint32_t val)
|
||||
void stb_phys(target_phys_addr_t addr, uint32_t val)
|
||||
{
|
||||
uint8_t v = val;
|
||||
cpu_physical_memory_write(addr, &v, 1);
|
||||
}
|
||||
|
||||
/* XXX: optimize */
|
||||
void stw_phys(a_target_phys_addr addr, uint32_t val)
|
||||
void stw_phys(target_phys_addr_t addr, uint32_t val)
|
||||
{
|
||||
uint16_t v = tswap16(val);
|
||||
cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
|
||||
}
|
||||
|
||||
/* XXX: optimize */
|
||||
void stq_phys(a_target_phys_addr addr, uint64_t val)
|
||||
void stq_phys(target_phys_addr_t addr, uint64_t val)
|
||||
{
|
||||
val = tswap64(val);
|
||||
cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
|
||||
|
|
@ -3523,7 +3523,7 @@ int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
|
|||
uint8_t *buf, int len, int is_write)
|
||||
{
|
||||
int l;
|
||||
a_target_phys_addr phys_addr;
|
||||
target_phys_addr_t phys_addr;
|
||||
target_ulong page;
|
||||
|
||||
while (len > 0) {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue