mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-02 15:23:53 -06:00
Fixes for TLB_BSWAP
Coversion of NOTDIRTY and ROM handling to cputlb Followup cleanups to cputlb -----BEGIN PGP SIGNATURE----- iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAl2LtM0dHHJpY2hhcmQu aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/sZwf/exhxY+VEyK8bFyE7 DCiVZf7vc0kc1mK79SFN59cGlTx6vtG51ZgmbsrE2niJanrq5oj/iefrzVcP1WQE LuMqeTWKUJjpT0Nm7E5QIzMyYELjhE4ldEzzh8meHeqLYWTXdXD3/gHGiJFdqic7 /2c8zDYpkVp6ss7ryppT7vtfsHhG33TMoKb+TLUgYdr3VU5bfKVmVtXto23YDmp+ +ZZHczhFy6FB+k3V4+ClyGcaoVwvsVx3AhGIuFDZCS64QuHmWkM4YuWFNjzjX2KV EYmp3aK728DWUbLax9LClks9hDSZvuX8m4+dDDt4ykOgwhMzJtYM1e/HgEVWa7bk nH/koA== =/g/U -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20190925' into staging Fixes for TLB_BSWAP Coversion of NOTDIRTY and ROM handling to cputlb Followup cleanups to cputlb # gpg: Signature made Wed 25 Sep 2019 19:41:17 BST # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * remotes/rth/tags/pull-tcg-20190925: cputlb: Pass retaddr to tb_check_watchpoint cputlb: Pass retaddr to tb_invalidate_phys_page_fast cputlb: Remove tb_invalidate_phys_page_range is_cpu_write_access cputlb: Remove cpu->mem_io_vaddr cputlb: Handle TLB_NOTDIRTY in probe_access cputlb: Merge and move memory_notdirty_write_{prepare,complete} cputlb: Partially inline memory_region_section_get_iotlb cputlb: Move NOTDIRTY handling from I/O path to TLB path cputlb: Move ROM handling from I/O path to TLB path exec: Adjust notdirty tracing cputlb: Introduce TLB_BSWAP cputlb: Split out load/store_memop cputlb: Use qemu_build_not_reached in load/store_helpers qemu/compiler.h: Add qemu_build_not_reached cputlb: Disable __always_inline__ without optimization exec: Use TARGET_PAGE_BITS_MIN for TLB flags Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
deee6ff7b7
13 changed files with 288 additions and 427 deletions
|
@ -33,6 +33,7 @@
|
||||||
#include "exec/helper-proto.h"
|
#include "exec/helper-proto.h"
|
||||||
#include "qemu/atomic.h"
|
#include "qemu/atomic.h"
|
||||||
#include "qemu/atomic128.h"
|
#include "qemu/atomic128.h"
|
||||||
|
#include "translate-all.h"
|
||||||
|
|
||||||
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
|
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
|
||||||
/* #define DEBUG_TLB */
|
/* #define DEBUG_TLB */
|
||||||
|
@ -577,7 +578,8 @@ static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
|
||||||
{
|
{
|
||||||
uintptr_t addr = tlb_entry->addr_write;
|
uintptr_t addr = tlb_entry->addr_write;
|
||||||
|
|
||||||
if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
|
if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
|
||||||
|
TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
|
||||||
addr &= TARGET_PAGE_MASK;
|
addr &= TARGET_PAGE_MASK;
|
||||||
addr += tlb_entry->addend;
|
addr += tlb_entry->addend;
|
||||||
if ((addr - start) < length) {
|
if ((addr - start) < length) {
|
||||||
|
@ -704,13 +706,14 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
||||||
MemoryRegionSection *section;
|
MemoryRegionSection *section;
|
||||||
unsigned int index;
|
unsigned int index;
|
||||||
target_ulong address;
|
target_ulong address;
|
||||||
target_ulong code_address;
|
target_ulong write_address;
|
||||||
uintptr_t addend;
|
uintptr_t addend;
|
||||||
CPUTLBEntry *te, tn;
|
CPUTLBEntry *te, tn;
|
||||||
hwaddr iotlb, xlat, sz, paddr_page;
|
hwaddr iotlb, xlat, sz, paddr_page;
|
||||||
target_ulong vaddr_page;
|
target_ulong vaddr_page;
|
||||||
int asidx = cpu_asidx_from_attrs(cpu, attrs);
|
int asidx = cpu_asidx_from_attrs(cpu, attrs);
|
||||||
int wp_flags;
|
int wp_flags;
|
||||||
|
bool is_ram, is_romd;
|
||||||
|
|
||||||
assert_cpu_is_self(cpu);
|
assert_cpu_is_self(cpu);
|
||||||
|
|
||||||
|
@ -737,22 +740,48 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
||||||
address |= TLB_INVALID_MASK;
|
address |= TLB_INVALID_MASK;
|
||||||
}
|
}
|
||||||
if (attrs.byte_swap) {
|
if (attrs.byte_swap) {
|
||||||
/* Force the access through the I/O slow path. */
|
address |= TLB_BSWAP;
|
||||||
address |= TLB_MMIO;
|
}
|
||||||
}
|
|
||||||
if (!memory_region_is_ram(section->mr) &&
|
is_ram = memory_region_is_ram(section->mr);
|
||||||
!memory_region_is_romd(section->mr)) {
|
is_romd = memory_region_is_romd(section->mr);
|
||||||
/* IO memory case */
|
|
||||||
address |= TLB_MMIO;
|
if (is_ram || is_romd) {
|
||||||
addend = 0;
|
/* RAM and ROMD both have associated host memory. */
|
||||||
} else {
|
addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
|
||||||
/* TLB_MMIO for rom/romd handled below */
|
} else {
|
||||||
addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
|
/* I/O does not; force the host address to NULL. */
|
||||||
|
addend = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
write_address = address;
|
||||||
|
if (is_ram) {
|
||||||
|
iotlb = memory_region_get_ram_addr(section->mr) + xlat;
|
||||||
|
/*
|
||||||
|
* Computing is_clean is expensive; avoid all that unless
|
||||||
|
* the page is actually writable.
|
||||||
|
*/
|
||||||
|
if (prot & PAGE_WRITE) {
|
||||||
|
if (section->readonly) {
|
||||||
|
write_address |= TLB_DISCARD_WRITE;
|
||||||
|
} else if (cpu_physical_memory_is_clean(iotlb)) {
|
||||||
|
write_address |= TLB_NOTDIRTY;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* I/O or ROMD */
|
||||||
|
iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
|
||||||
|
/*
|
||||||
|
* Writes to romd devices must go through MMIO to enable write.
|
||||||
|
* Reads to romd devices go through the ram_ptr found above,
|
||||||
|
* but of course reads to I/O must go through MMIO.
|
||||||
|
*/
|
||||||
|
write_address |= TLB_MMIO;
|
||||||
|
if (!is_romd) {
|
||||||
|
address = write_address;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
code_address = address;
|
|
||||||
iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
|
|
||||||
paddr_page, xlat, prot, &address);
|
|
||||||
wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
|
wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
|
||||||
TARGET_PAGE_SIZE);
|
TARGET_PAGE_SIZE);
|
||||||
|
|
||||||
|
@ -792,8 +821,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
||||||
/*
|
/*
|
||||||
* At this point iotlb contains a physical section number in the lower
|
* At this point iotlb contains a physical section number in the lower
|
||||||
* TARGET_PAGE_BITS, and either
|
* TARGET_PAGE_BITS, and either
|
||||||
* + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
|
* + the ram_addr_t of the page base of the target RAM (RAM)
|
||||||
* + the offset within section->mr of the page base (otherwise)
|
* + the offset within section->mr of the page base (I/O, ROMD)
|
||||||
* We subtract the vaddr_page (which is page aligned and thus won't
|
* We subtract the vaddr_page (which is page aligned and thus won't
|
||||||
* disturb the low bits) to give an offset which can be added to the
|
* disturb the low bits) to give an offset which can be added to the
|
||||||
* (non-page-aligned) vaddr of the eventual memory access to get
|
* (non-page-aligned) vaddr of the eventual memory access to get
|
||||||
|
@ -816,24 +845,14 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (prot & PAGE_EXEC) {
|
if (prot & PAGE_EXEC) {
|
||||||
tn.addr_code = code_address;
|
tn.addr_code = address;
|
||||||
} else {
|
} else {
|
||||||
tn.addr_code = -1;
|
tn.addr_code = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
tn.addr_write = -1;
|
tn.addr_write = -1;
|
||||||
if (prot & PAGE_WRITE) {
|
if (prot & PAGE_WRITE) {
|
||||||
if ((memory_region_is_ram(section->mr) && section->readonly)
|
tn.addr_write = write_address;
|
||||||
|| memory_region_is_romd(section->mr)) {
|
|
||||||
/* Write access calls the I/O callback. */
|
|
||||||
tn.addr_write = address | TLB_MMIO;
|
|
||||||
} else if (memory_region_is_ram(section->mr)
|
|
||||||
&& cpu_physical_memory_is_clean(
|
|
||||||
memory_region_get_ram_addr(section->mr) + xlat)) {
|
|
||||||
tn.addr_write = address | TLB_NOTDIRTY;
|
|
||||||
} else {
|
|
||||||
tn.addr_write = address;
|
|
||||||
}
|
|
||||||
if (prot & PAGE_WRITE_INV) {
|
if (prot & PAGE_WRITE_INV) {
|
||||||
tn.addr_write |= TLB_INVALID_MASK;
|
tn.addr_write |= TLB_INVALID_MASK;
|
||||||
}
|
}
|
||||||
|
@ -901,19 +920,14 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
||||||
bool locked = false;
|
bool locked = false;
|
||||||
MemTxResult r;
|
MemTxResult r;
|
||||||
|
|
||||||
if (iotlbentry->attrs.byte_swap) {
|
|
||||||
op ^= MO_BSWAP;
|
|
||||||
}
|
|
||||||
|
|
||||||
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
|
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
|
||||||
mr = section->mr;
|
mr = section->mr;
|
||||||
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
|
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
|
||||||
cpu->mem_io_pc = retaddr;
|
cpu->mem_io_pc = retaddr;
|
||||||
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
|
if (!cpu->can_do_io) {
|
||||||
cpu_io_recompile(cpu, retaddr);
|
cpu_io_recompile(cpu, retaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
cpu->mem_io_vaddr = addr;
|
|
||||||
cpu->mem_io_access_type = access_type;
|
cpu->mem_io_access_type = access_type;
|
||||||
|
|
||||||
if (mr->global_locking && !qemu_mutex_iothread_locked()) {
|
if (mr->global_locking && !qemu_mutex_iothread_locked()) {
|
||||||
|
@ -947,17 +961,12 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
||||||
bool locked = false;
|
bool locked = false;
|
||||||
MemTxResult r;
|
MemTxResult r;
|
||||||
|
|
||||||
if (iotlbentry->attrs.byte_swap) {
|
|
||||||
op ^= MO_BSWAP;
|
|
||||||
}
|
|
||||||
|
|
||||||
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
|
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
|
||||||
mr = section->mr;
|
mr = section->mr;
|
||||||
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
|
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
|
||||||
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
|
if (!cpu->can_do_io) {
|
||||||
cpu_io_recompile(cpu, retaddr);
|
cpu_io_recompile(cpu, retaddr);
|
||||||
}
|
}
|
||||||
cpu->mem_io_vaddr = addr;
|
|
||||||
cpu->mem_io_pc = retaddr;
|
cpu->mem_io_pc = retaddr;
|
||||||
|
|
||||||
if (mr->global_locking && !qemu_mutex_iothread_locked()) {
|
if (mr->global_locking && !qemu_mutex_iothread_locked()) {
|
||||||
|
@ -1075,6 +1084,33 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
|
||||||
return qemu_ram_addr_from_host_nofail(p);
|
return qemu_ram_addr_from_host_nofail(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
|
||||||
|
CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
|
||||||
|
{
|
||||||
|
ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;
|
||||||
|
|
||||||
|
trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
|
||||||
|
|
||||||
|
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
|
||||||
|
struct page_collection *pages
|
||||||
|
= page_collection_lock(ram_addr, ram_addr + size);
|
||||||
|
tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
|
||||||
|
page_collection_unlock(pages);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set both VGA and migration bits for simplicity and to remove
|
||||||
|
* the notdirty callback faster.
|
||||||
|
*/
|
||||||
|
cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
|
||||||
|
|
||||||
|
/* We remove the notdirty callback only if the code has been flushed. */
|
||||||
|
if (!cpu_physical_memory_is_clean(ram_addr)) {
|
||||||
|
trace_memory_notdirty_set_dirty(mem_vaddr);
|
||||||
|
tlb_set_dirty(cpu, mem_vaddr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Probe for whether the specified guest access is permitted. If it is not
|
* Probe for whether the specified guest access is permitted. If it is not
|
||||||
* permitted then an exception will be taken in the same way as if this
|
* permitted then an exception will be taken in the same way as if this
|
||||||
|
@ -1126,16 +1162,24 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (unlikely(tlb_addr & TLB_FLAGS_MASK)) {
|
||||||
|
CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
|
||||||
|
|
||||||
|
/* Reject I/O access, or other required slow-path. */
|
||||||
|
if (tlb_addr & (TLB_MMIO | TLB_BSWAP | TLB_DISCARD_WRITE)) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/* Handle watchpoints. */
|
/* Handle watchpoints. */
|
||||||
if (tlb_addr & TLB_WATCHPOINT) {
|
if (tlb_addr & TLB_WATCHPOINT) {
|
||||||
cpu_check_watchpoint(env_cpu(env), addr, size,
|
cpu_check_watchpoint(env_cpu(env), addr, size,
|
||||||
env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
|
iotlbentry->attrs, wp_access, retaddr);
|
||||||
wp_access, retaddr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO)) {
|
/* Handle clean RAM pages. */
|
||||||
/* I/O access */
|
if (tlb_addr & TLB_NOTDIRTY) {
|
||||||
return NULL;
|
notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return (void *)((uintptr_t)addr + entry->addend);
|
return (void *)((uintptr_t)addr + entry->addend);
|
||||||
|
@ -1194,8 +1238,7 @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
|
||||||
/* Probe for a read-modify-write atomic operation. Do not allow unaligned
|
/* Probe for a read-modify-write atomic operation. Do not allow unaligned
|
||||||
* operations, or io operations to proceed. Return the host address. */
|
* operations, or io operations to proceed. Return the host address. */
|
||||||
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr,
|
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||||
NotDirtyInfo *ndi)
|
|
||||||
{
|
{
|
||||||
size_t mmu_idx = get_mmuidx(oi);
|
size_t mmu_idx = get_mmuidx(oi);
|
||||||
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
||||||
|
@ -1255,12 +1298,9 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
||||||
|
|
||||||
hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
|
hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
|
||||||
|
|
||||||
ndi->active = false;
|
|
||||||
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
|
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
|
||||||
ndi->active = true;
|
notdirty_write(env_cpu(env), addr, 1 << s_bits,
|
||||||
memory_notdirty_write_prepare(ndi, env_cpu(env), addr,
|
&env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
|
||||||
qemu_ram_addr_from_host_nofail(hostaddr),
|
|
||||||
1 << s_bits);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return hostaddr;
|
return hostaddr;
|
||||||
|
@ -1281,7 +1321,30 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
||||||
typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
|
typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
TCGMemOpIdx oi, uintptr_t retaddr);
|
||||||
|
|
||||||
static inline uint64_t __attribute__((always_inline))
|
static inline uint64_t QEMU_ALWAYS_INLINE
|
||||||
|
load_memop(const void *haddr, MemOp op)
|
||||||
|
{
|
||||||
|
switch (op) {
|
||||||
|
case MO_UB:
|
||||||
|
return ldub_p(haddr);
|
||||||
|
case MO_BEUW:
|
||||||
|
return lduw_be_p(haddr);
|
||||||
|
case MO_LEUW:
|
||||||
|
return lduw_le_p(haddr);
|
||||||
|
case MO_BEUL:
|
||||||
|
return (uint32_t)ldl_be_p(haddr);
|
||||||
|
case MO_LEUL:
|
||||||
|
return (uint32_t)ldl_le_p(haddr);
|
||||||
|
case MO_BEQ:
|
||||||
|
return ldq_be_p(haddr);
|
||||||
|
case MO_LEQ:
|
||||||
|
return ldq_le_p(haddr);
|
||||||
|
default:
|
||||||
|
qemu_build_not_reached();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline uint64_t QEMU_ALWAYS_INLINE
|
||||||
load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
||||||
uintptr_t retaddr, MemOp op, bool code_read,
|
uintptr_t retaddr, MemOp op, bool code_read,
|
||||||
FullLoadHelper *full_load)
|
FullLoadHelper *full_load)
|
||||||
|
@ -1321,6 +1384,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
||||||
/* Handle anything that isn't just a straight memory access. */
|
/* Handle anything that isn't just a straight memory access. */
|
||||||
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
|
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
|
||||||
CPUIOTLBEntry *iotlbentry;
|
CPUIOTLBEntry *iotlbentry;
|
||||||
|
bool need_swap;
|
||||||
|
|
||||||
/* For anything that is unaligned, recurse through full_load. */
|
/* For anything that is unaligned, recurse through full_load. */
|
||||||
if ((addr & (size - 1)) != 0) {
|
if ((addr & (size - 1)) != 0) {
|
||||||
|
@ -1334,17 +1398,27 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
||||||
/* On watchpoint hit, this will longjmp out. */
|
/* On watchpoint hit, this will longjmp out. */
|
||||||
cpu_check_watchpoint(env_cpu(env), addr, size,
|
cpu_check_watchpoint(env_cpu(env), addr, size,
|
||||||
iotlbentry->attrs, BP_MEM_READ, retaddr);
|
iotlbentry->attrs, BP_MEM_READ, retaddr);
|
||||||
|
}
|
||||||
|
|
||||||
/* The backing page may or may not require I/O. */
|
need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
|
||||||
tlb_addr &= ~TLB_WATCHPOINT;
|
|
||||||
if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
|
|
||||||
goto do_aligned_access;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Handle I/O access. */
|
/* Handle I/O access. */
|
||||||
return io_readx(env, iotlbentry, mmu_idx, addr,
|
if (likely(tlb_addr & TLB_MMIO)) {
|
||||||
retaddr, access_type, op);
|
return io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
|
||||||
|
access_type, op ^ (need_swap * MO_BSWAP));
|
||||||
|
}
|
||||||
|
|
||||||
|
haddr = (void *)((uintptr_t)addr + entry->addend);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Keep these two load_memop separate to ensure that the compiler
|
||||||
|
* is able to fold the entire function to a single instruction.
|
||||||
|
* There is a build-time assert inside to remind you of this. ;-)
|
||||||
|
*/
|
||||||
|
if (unlikely(need_swap)) {
|
||||||
|
return load_memop(haddr, op ^ MO_BSWAP);
|
||||||
|
}
|
||||||
|
return load_memop(haddr, op);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handle slow unaligned access (it spans two pages or IO). */
|
/* Handle slow unaligned access (it spans two pages or IO). */
|
||||||
|
@ -1371,35 +1445,8 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
||||||
return res & MAKE_64BIT_MASK(0, size * 8);
|
return res & MAKE_64BIT_MASK(0, size * 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
do_aligned_access:
|
|
||||||
haddr = (void *)((uintptr_t)addr + entry->addend);
|
haddr = (void *)((uintptr_t)addr + entry->addend);
|
||||||
switch (op) {
|
return load_memop(haddr, op);
|
||||||
case MO_UB:
|
|
||||||
res = ldub_p(haddr);
|
|
||||||
break;
|
|
||||||
case MO_BEUW:
|
|
||||||
res = lduw_be_p(haddr);
|
|
||||||
break;
|
|
||||||
case MO_LEUW:
|
|
||||||
res = lduw_le_p(haddr);
|
|
||||||
break;
|
|
||||||
case MO_BEUL:
|
|
||||||
res = (uint32_t)ldl_be_p(haddr);
|
|
||||||
break;
|
|
||||||
case MO_LEUL:
|
|
||||||
res = (uint32_t)ldl_le_p(haddr);
|
|
||||||
break;
|
|
||||||
case MO_BEQ:
|
|
||||||
res = ldq_be_p(haddr);
|
|
||||||
break;
|
|
||||||
case MO_LEQ:
|
|
||||||
res = ldq_le_p(haddr);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
g_assert_not_reached();
|
|
||||||
}
|
|
||||||
|
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1530,7 +1577,37 @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
|
||||||
* Store Helpers
|
* Store Helpers
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static inline void __attribute__((always_inline))
|
static inline void QEMU_ALWAYS_INLINE
|
||||||
|
store_memop(void *haddr, uint64_t val, MemOp op)
|
||||||
|
{
|
||||||
|
switch (op) {
|
||||||
|
case MO_UB:
|
||||||
|
stb_p(haddr, val);
|
||||||
|
break;
|
||||||
|
case MO_BEUW:
|
||||||
|
stw_be_p(haddr, val);
|
||||||
|
break;
|
||||||
|
case MO_LEUW:
|
||||||
|
stw_le_p(haddr, val);
|
||||||
|
break;
|
||||||
|
case MO_BEUL:
|
||||||
|
stl_be_p(haddr, val);
|
||||||
|
break;
|
||||||
|
case MO_LEUL:
|
||||||
|
stl_le_p(haddr, val);
|
||||||
|
break;
|
||||||
|
case MO_BEQ:
|
||||||
|
stq_be_p(haddr, val);
|
||||||
|
break;
|
||||||
|
case MO_LEQ:
|
||||||
|
stq_le_p(haddr, val);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
qemu_build_not_reached();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void QEMU_ALWAYS_INLINE
|
||||||
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
|
TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
|
||||||
{
|
{
|
||||||
|
@ -1564,6 +1641,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
/* Handle anything that isn't just a straight memory access. */
|
/* Handle anything that isn't just a straight memory access. */
|
||||||
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
|
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
|
||||||
CPUIOTLBEntry *iotlbentry;
|
CPUIOTLBEntry *iotlbentry;
|
||||||
|
bool need_swap;
|
||||||
|
|
||||||
/* For anything that is unaligned, recurse through byte stores. */
|
/* For anything that is unaligned, recurse through byte stores. */
|
||||||
if ((addr & (size - 1)) != 0) {
|
if ((addr & (size - 1)) != 0) {
|
||||||
|
@ -1577,16 +1655,39 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
/* On watchpoint hit, this will longjmp out. */
|
/* On watchpoint hit, this will longjmp out. */
|
||||||
cpu_check_watchpoint(env_cpu(env), addr, size,
|
cpu_check_watchpoint(env_cpu(env), addr, size,
|
||||||
iotlbentry->attrs, BP_MEM_WRITE, retaddr);
|
iotlbentry->attrs, BP_MEM_WRITE, retaddr);
|
||||||
|
}
|
||||||
|
|
||||||
/* The backing page may or may not require I/O. */
|
need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
|
||||||
tlb_addr &= ~TLB_WATCHPOINT;
|
|
||||||
if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
|
|
||||||
goto do_aligned_access;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Handle I/O access. */
|
/* Handle I/O access. */
|
||||||
io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, op);
|
if (tlb_addr & TLB_MMIO) {
|
||||||
|
io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
|
||||||
|
op ^ (need_swap * MO_BSWAP));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Ignore writes to ROM. */
|
||||||
|
if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Handle clean RAM pages. */
|
||||||
|
if (tlb_addr & TLB_NOTDIRTY) {
|
||||||
|
notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
|
||||||
|
}
|
||||||
|
|
||||||
|
haddr = (void *)((uintptr_t)addr + entry->addend);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Keep these two store_memop separate to ensure that the compiler
|
||||||
|
* is able to fold the entire function to a single instruction.
|
||||||
|
* There is a build-time assert inside to remind you of this. ;-)
|
||||||
|
*/
|
||||||
|
if (unlikely(need_swap)) {
|
||||||
|
store_memop(haddr, val, op ^ MO_BSWAP);
|
||||||
|
} else {
|
||||||
|
store_memop(haddr, val, op);
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1655,34 +1756,8 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
do_aligned_access:
|
|
||||||
haddr = (void *)((uintptr_t)addr + entry->addend);
|
haddr = (void *)((uintptr_t)addr + entry->addend);
|
||||||
switch (op) {
|
store_memop(haddr, val, op);
|
||||||
case MO_UB:
|
|
||||||
stb_p(haddr, val);
|
|
||||||
break;
|
|
||||||
case MO_BEUW:
|
|
||||||
stw_be_p(haddr, val);
|
|
||||||
break;
|
|
||||||
case MO_LEUW:
|
|
||||||
stw_le_p(haddr, val);
|
|
||||||
break;
|
|
||||||
case MO_BEUL:
|
|
||||||
stl_be_p(haddr, val);
|
|
||||||
break;
|
|
||||||
case MO_LEUL:
|
|
||||||
stl_le_p(haddr, val);
|
|
||||||
break;
|
|
||||||
case MO_BEQ:
|
|
||||||
stq_be_p(haddr, val);
|
|
||||||
break;
|
|
||||||
case MO_LEQ:
|
|
||||||
stq_le_p(haddr, val);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
g_assert_not_reached();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
|
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
|
||||||
|
@ -1733,14 +1808,9 @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
|
#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
|
||||||
#define ATOMIC_NAME(X) \
|
#define ATOMIC_NAME(X) \
|
||||||
HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
|
HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
|
||||||
#define ATOMIC_MMU_DECLS NotDirtyInfo ndi
|
#define ATOMIC_MMU_DECLS
|
||||||
#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
|
#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
|
||||||
#define ATOMIC_MMU_CLEANUP \
|
#define ATOMIC_MMU_CLEANUP
|
||||||
do { \
|
|
||||||
if (unlikely(ndi.active)) { \
|
|
||||||
memory_notdirty_write_complete(&ndi); \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define DATA_SIZE 1
|
#define DATA_SIZE 1
|
||||||
#include "atomic_template.h"
|
#include "atomic_template.h"
|
||||||
|
@ -1768,7 +1838,7 @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
#undef ATOMIC_MMU_LOOKUP
|
#undef ATOMIC_MMU_LOOKUP
|
||||||
#define EXTRA_ARGS , TCGMemOpIdx oi
|
#define EXTRA_ARGS , TCGMemOpIdx oi
|
||||||
#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
|
#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
|
||||||
#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
|
#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
|
||||||
|
|
||||||
#define DATA_SIZE 1
|
#define DATA_SIZE 1
|
||||||
#include "atomic_template.h"
|
#include "atomic_template.h"
|
||||||
|
|
|
@ -1889,7 +1889,7 @@ static void
|
||||||
tb_invalidate_phys_page_range__locked(struct page_collection *pages,
|
tb_invalidate_phys_page_range__locked(struct page_collection *pages,
|
||||||
PageDesc *p, tb_page_addr_t start,
|
PageDesc *p, tb_page_addr_t start,
|
||||||
tb_page_addr_t end,
|
tb_page_addr_t end,
|
||||||
int is_cpu_write_access)
|
uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
TranslationBlock *tb;
|
TranslationBlock *tb;
|
||||||
tb_page_addr_t tb_start, tb_end;
|
tb_page_addr_t tb_start, tb_end;
|
||||||
|
@ -1897,9 +1897,9 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
|
||||||
#ifdef TARGET_HAS_PRECISE_SMC
|
#ifdef TARGET_HAS_PRECISE_SMC
|
||||||
CPUState *cpu = current_cpu;
|
CPUState *cpu = current_cpu;
|
||||||
CPUArchState *env = NULL;
|
CPUArchState *env = NULL;
|
||||||
int current_tb_not_found = is_cpu_write_access;
|
bool current_tb_not_found = retaddr != 0;
|
||||||
|
bool current_tb_modified = false;
|
||||||
TranslationBlock *current_tb = NULL;
|
TranslationBlock *current_tb = NULL;
|
||||||
int current_tb_modified = 0;
|
|
||||||
target_ulong current_pc = 0;
|
target_ulong current_pc = 0;
|
||||||
target_ulong current_cs_base = 0;
|
target_ulong current_cs_base = 0;
|
||||||
uint32_t current_flags = 0;
|
uint32_t current_flags = 0;
|
||||||
|
@ -1931,24 +1931,21 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
|
||||||
if (!(tb_end <= start || tb_start >= end)) {
|
if (!(tb_end <= start || tb_start >= end)) {
|
||||||
#ifdef TARGET_HAS_PRECISE_SMC
|
#ifdef TARGET_HAS_PRECISE_SMC
|
||||||
if (current_tb_not_found) {
|
if (current_tb_not_found) {
|
||||||
current_tb_not_found = 0;
|
current_tb_not_found = false;
|
||||||
current_tb = NULL;
|
|
||||||
if (cpu->mem_io_pc) {
|
|
||||||
/* now we have a real cpu fault */
|
/* now we have a real cpu fault */
|
||||||
current_tb = tcg_tb_lookup(cpu->mem_io_pc);
|
current_tb = tcg_tb_lookup(retaddr);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (current_tb == tb &&
|
if (current_tb == tb &&
|
||||||
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
|
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
|
||||||
/* If we are modifying the current TB, we must stop
|
/*
|
||||||
its execution. We could be more precise by checking
|
* If we are modifying the current TB, we must stop
|
||||||
that the modification is after the current PC, but it
|
* its execution. We could be more precise by checking
|
||||||
would require a specialized function to partially
|
* that the modification is after the current PC, but it
|
||||||
restore the CPU state */
|
* would require a specialized function to partially
|
||||||
|
* restore the CPU state.
|
||||||
current_tb_modified = 1;
|
*/
|
||||||
cpu_restore_state_from_tb(cpu, current_tb,
|
current_tb_modified = true;
|
||||||
cpu->mem_io_pc, true);
|
cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
|
||||||
cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
|
cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
|
||||||
¤t_flags);
|
¤t_flags);
|
||||||
}
|
}
|
||||||
|
@ -1983,8 +1980,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
|
||||||
*
|
*
|
||||||
* Called with mmap_lock held for user-mode emulation
|
* Called with mmap_lock held for user-mode emulation
|
||||||
*/
|
*/
|
||||||
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
|
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
|
||||||
int is_cpu_write_access)
|
|
||||||
{
|
{
|
||||||
struct page_collection *pages;
|
struct page_collection *pages;
|
||||||
PageDesc *p;
|
PageDesc *p;
|
||||||
|
@ -1996,8 +1992,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
pages = page_collection_lock(start, end);
|
pages = page_collection_lock(start, end);
|
||||||
tb_invalidate_phys_page_range__locked(pages, p, start, end,
|
tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
|
||||||
is_cpu_write_access);
|
|
||||||
page_collection_unlock(pages);
|
page_collection_unlock(pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2044,7 +2039,8 @@ void tb_invalidate_phys_range(target_ulong start, target_ulong end)
|
||||||
* Call with all @pages in the range [@start, @start + len[ locked.
|
* Call with all @pages in the range [@start, @start + len[ locked.
|
||||||
*/
|
*/
|
||||||
void tb_invalidate_phys_page_fast(struct page_collection *pages,
|
void tb_invalidate_phys_page_fast(struct page_collection *pages,
|
||||||
tb_page_addr_t start, int len)
|
tb_page_addr_t start, int len,
|
||||||
|
uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
PageDesc *p;
|
PageDesc *p;
|
||||||
|
|
||||||
|
@ -2071,7 +2067,8 @@ void tb_invalidate_phys_page_fast(struct page_collection *pages,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
do_invalidate:
|
do_invalidate:
|
||||||
tb_invalidate_phys_page_range__locked(pages, p, start, start + len, 1);
|
tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
|
||||||
|
retaddr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
@ -2145,16 +2142,16 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* user-mode: call with mmap_lock held */
|
/* user-mode: call with mmap_lock held */
|
||||||
void tb_check_watchpoint(CPUState *cpu)
|
void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
TranslationBlock *tb;
|
TranslationBlock *tb;
|
||||||
|
|
||||||
assert_memory_lock();
|
assert_memory_lock();
|
||||||
|
|
||||||
tb = tcg_tb_lookup(cpu->mem_io_pc);
|
tb = tcg_tb_lookup(retaddr);
|
||||||
if (tb) {
|
if (tb) {
|
||||||
/* We can use retranslation to find the PC. */
|
/* We can use retranslation to find the PC. */
|
||||||
cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc, true);
|
cpu_restore_state_from_tb(cpu, tb, retaddr, true);
|
||||||
tb_phys_invalidate(tb, -1);
|
tb_phys_invalidate(tb, -1);
|
||||||
} else {
|
} else {
|
||||||
/* The exception probably happened in a helper. The CPU state should
|
/* The exception probably happened in a helper. The CPU state should
|
||||||
|
|
|
@ -27,10 +27,10 @@ struct page_collection *page_collection_lock(tb_page_addr_t start,
|
||||||
tb_page_addr_t end);
|
tb_page_addr_t end);
|
||||||
void page_collection_unlock(struct page_collection *set);
|
void page_collection_unlock(struct page_collection *set);
|
||||||
void tb_invalidate_phys_page_fast(struct page_collection *pages,
|
void tb_invalidate_phys_page_fast(struct page_collection *pages,
|
||||||
tb_page_addr_t start, int len);
|
tb_page_addr_t start, int len,
|
||||||
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
|
uintptr_t retaddr);
|
||||||
int is_cpu_write_access);
|
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end);
|
||||||
void tb_check_watchpoint(CPUState *cpu);
|
void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr);
|
||||||
|
|
||||||
#ifdef CONFIG_USER_ONLY
|
#ifdef CONFIG_USER_ONLY
|
||||||
int page_unprotect(target_ulong address, uintptr_t pc);
|
int page_unprotect(target_ulong address, uintptr_t pc);
|
||||||
|
|
158
exec.c
158
exec.c
|
@ -88,7 +88,6 @@ static MemoryRegion *system_io;
|
||||||
AddressSpace address_space_io;
|
AddressSpace address_space_io;
|
||||||
AddressSpace address_space_memory;
|
AddressSpace address_space_memory;
|
||||||
|
|
||||||
MemoryRegion io_mem_rom, io_mem_notdirty;
|
|
||||||
static MemoryRegion io_mem_unassigned;
|
static MemoryRegion io_mem_unassigned;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -191,8 +190,6 @@ typedef struct subpage_t {
|
||||||
} subpage_t;
|
} subpage_t;
|
||||||
|
|
||||||
#define PHYS_SECTION_UNASSIGNED 0
|
#define PHYS_SECTION_UNASSIGNED 0
|
||||||
#define PHYS_SECTION_NOTDIRTY 1
|
|
||||||
#define PHYS_SECTION_ROM 2
|
|
||||||
|
|
||||||
static void io_mem_init(void);
|
static void io_mem_init(void);
|
||||||
static void memory_map_init(void);
|
static void memory_map_init(void);
|
||||||
|
@ -1015,7 +1012,7 @@ const char *parse_cpu_option(const char *cpu_option)
|
||||||
void tb_invalidate_phys_addr(target_ulong addr)
|
void tb_invalidate_phys_addr(target_ulong addr)
|
||||||
{
|
{
|
||||||
mmap_lock();
|
mmap_lock();
|
||||||
tb_invalidate_phys_page_range(addr, addr + 1, 0);
|
tb_invalidate_phys_page_range(addr, addr + 1);
|
||||||
mmap_unlock();
|
mmap_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1042,7 +1039,7 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ram_addr = memory_region_get_ram_addr(mr) + addr;
|
ram_addr = memory_region_get_ram_addr(mr) + addr;
|
||||||
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
|
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1462,31 +1459,10 @@ bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
|
||||||
|
|
||||||
/* Called from RCU critical section */
|
/* Called from RCU critical section */
|
||||||
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
|
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
|
||||||
MemoryRegionSection *section,
|
MemoryRegionSection *section)
|
||||||
target_ulong vaddr,
|
|
||||||
hwaddr paddr, hwaddr xlat,
|
|
||||||
int prot,
|
|
||||||
target_ulong *address)
|
|
||||||
{
|
{
|
||||||
hwaddr iotlb;
|
AddressSpaceDispatch *d = flatview_to_dispatch(section->fv);
|
||||||
|
return section - d->map.sections;
|
||||||
if (memory_region_is_ram(section->mr)) {
|
|
||||||
/* Normal RAM. */
|
|
||||||
iotlb = memory_region_get_ram_addr(section->mr) + xlat;
|
|
||||||
if (!section->readonly) {
|
|
||||||
iotlb |= PHYS_SECTION_NOTDIRTY;
|
|
||||||
} else {
|
|
||||||
iotlb |= PHYS_SECTION_ROM;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
AddressSpaceDispatch *d;
|
|
||||||
|
|
||||||
d = flatview_to_dispatch(section->fv);
|
|
||||||
iotlb = section - d->map.sections;
|
|
||||||
iotlb += xlat;
|
|
||||||
}
|
|
||||||
|
|
||||||
return iotlb;
|
|
||||||
}
|
}
|
||||||
#endif /* defined(CONFIG_USER_ONLY) */
|
#endif /* defined(CONFIG_USER_ONLY) */
|
||||||
|
|
||||||
|
@ -2742,83 +2718,6 @@ ram_addr_t qemu_ram_addr_from_host(void *ptr)
|
||||||
return block->offset + offset;
|
return block->offset + offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Called within RCU critical section. */
|
|
||||||
void memory_notdirty_write_prepare(NotDirtyInfo *ndi,
|
|
||||||
CPUState *cpu,
|
|
||||||
vaddr mem_vaddr,
|
|
||||||
ram_addr_t ram_addr,
|
|
||||||
unsigned size)
|
|
||||||
{
|
|
||||||
ndi->cpu = cpu;
|
|
||||||
ndi->ram_addr = ram_addr;
|
|
||||||
ndi->mem_vaddr = mem_vaddr;
|
|
||||||
ndi->size = size;
|
|
||||||
ndi->pages = NULL;
|
|
||||||
|
|
||||||
assert(tcg_enabled());
|
|
||||||
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
|
|
||||||
ndi->pages = page_collection_lock(ram_addr, ram_addr + size);
|
|
||||||
tb_invalidate_phys_page_fast(ndi->pages, ram_addr, size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Called within RCU critical section. */
|
|
||||||
void memory_notdirty_write_complete(NotDirtyInfo *ndi)
|
|
||||||
{
|
|
||||||
if (ndi->pages) {
|
|
||||||
assert(tcg_enabled());
|
|
||||||
page_collection_unlock(ndi->pages);
|
|
||||||
ndi->pages = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Set both VGA and migration bits for simplicity and to remove
|
|
||||||
* the notdirty callback faster.
|
|
||||||
*/
|
|
||||||
cpu_physical_memory_set_dirty_range(ndi->ram_addr, ndi->size,
|
|
||||||
DIRTY_CLIENTS_NOCODE);
|
|
||||||
/* we remove the notdirty callback only if the code has been
|
|
||||||
flushed */
|
|
||||||
if (!cpu_physical_memory_is_clean(ndi->ram_addr)) {
|
|
||||||
tlb_set_dirty(ndi->cpu, ndi->mem_vaddr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Called within RCU critical section. */
|
|
||||||
static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
|
|
||||||
uint64_t val, unsigned size)
|
|
||||||
{
|
|
||||||
NotDirtyInfo ndi;
|
|
||||||
|
|
||||||
memory_notdirty_write_prepare(&ndi, current_cpu, current_cpu->mem_io_vaddr,
|
|
||||||
ram_addr, size);
|
|
||||||
|
|
||||||
stn_p(qemu_map_ram_ptr(NULL, ram_addr), size, val);
|
|
||||||
memory_notdirty_write_complete(&ndi);
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
|
|
||||||
unsigned size, bool is_write,
|
|
||||||
MemTxAttrs attrs)
|
|
||||||
{
|
|
||||||
return is_write;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const MemoryRegionOps notdirty_mem_ops = {
|
|
||||||
.write = notdirty_mem_write,
|
|
||||||
.valid.accepts = notdirty_mem_accepts,
|
|
||||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
|
||||||
.valid = {
|
|
||||||
.min_access_size = 1,
|
|
||||||
.max_access_size = 8,
|
|
||||||
.unaligned = false,
|
|
||||||
},
|
|
||||||
.impl = {
|
|
||||||
.min_access_size = 1,
|
|
||||||
.max_access_size = 8,
|
|
||||||
.unaligned = false,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Generate a debug exception if a watchpoint has been hit. */
|
/* Generate a debug exception if a watchpoint has been hit. */
|
||||||
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
|
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
|
||||||
MemTxAttrs attrs, int flags, uintptr_t ra)
|
MemTxAttrs attrs, int flags, uintptr_t ra)
|
||||||
|
@ -2859,7 +2758,7 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
|
||||||
cpu->watchpoint_hit = wp;
|
cpu->watchpoint_hit = wp;
|
||||||
|
|
||||||
mmap_lock();
|
mmap_lock();
|
||||||
tb_check_watchpoint(cpu);
|
tb_check_watchpoint(cpu, ra);
|
||||||
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
|
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
|
||||||
cpu->exception_index = EXCP_DEBUG;
|
cpu->exception_index = EXCP_DEBUG;
|
||||||
mmap_unlock();
|
mmap_unlock();
|
||||||
|
@ -2999,38 +2898,6 @@ static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr)
|
||||||
return phys_section_add(map, §ion);
|
return phys_section_add(map, §ion);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void readonly_mem_write(void *opaque, hwaddr addr,
|
|
||||||
uint64_t val, unsigned size)
|
|
||||||
{
|
|
||||||
/* Ignore any write to ROM. */
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool readonly_mem_accepts(void *opaque, hwaddr addr,
|
|
||||||
unsigned size, bool is_write,
|
|
||||||
MemTxAttrs attrs)
|
|
||||||
{
|
|
||||||
return is_write;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* This will only be used for writes, because reads are special cased
|
|
||||||
* to directly access the underlying host ram.
|
|
||||||
*/
|
|
||||||
static const MemoryRegionOps readonly_mem_ops = {
|
|
||||||
.write = readonly_mem_write,
|
|
||||||
.valid.accepts = readonly_mem_accepts,
|
|
||||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
|
||||||
.valid = {
|
|
||||||
.min_access_size = 1,
|
|
||||||
.max_access_size = 8,
|
|
||||||
.unaligned = false,
|
|
||||||
},
|
|
||||||
.impl = {
|
|
||||||
.min_access_size = 1,
|
|
||||||
.max_access_size = 8,
|
|
||||||
.unaligned = false,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
MemoryRegionSection *iotlb_to_section(CPUState *cpu,
|
MemoryRegionSection *iotlb_to_section(CPUState *cpu,
|
||||||
hwaddr index, MemTxAttrs attrs)
|
hwaddr index, MemTxAttrs attrs)
|
||||||
{
|
{
|
||||||
|
@ -3044,17 +2911,8 @@ MemoryRegionSection *iotlb_to_section(CPUState *cpu,
|
||||||
|
|
||||||
static void io_mem_init(void)
|
static void io_mem_init(void)
|
||||||
{
|
{
|
||||||
memory_region_init_io(&io_mem_rom, NULL, &readonly_mem_ops,
|
|
||||||
NULL, NULL, UINT64_MAX);
|
|
||||||
memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
|
memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
|
||||||
NULL, UINT64_MAX);
|
NULL, UINT64_MAX);
|
||||||
|
|
||||||
/* io_mem_notdirty calls tb_invalidate_phys_page_fast,
|
|
||||||
* which can be called without the iothread mutex.
|
|
||||||
*/
|
|
||||||
memory_region_init_io(&io_mem_notdirty, NULL, ¬dirty_mem_ops, NULL,
|
|
||||||
NULL, UINT64_MAX);
|
|
||||||
memory_region_clear_global_locking(&io_mem_notdirty);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
|
AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
|
||||||
|
@ -3064,10 +2922,6 @@ AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
|
||||||
|
|
||||||
n = dummy_section(&d->map, fv, &io_mem_unassigned);
|
n = dummy_section(&d->map, fv, &io_mem_unassigned);
|
||||||
assert(n == PHYS_SECTION_UNASSIGNED);
|
assert(n == PHYS_SECTION_UNASSIGNED);
|
||||||
n = dummy_section(&d->map, fv, &io_mem_notdirty);
|
|
||||||
assert(n == PHYS_SECTION_NOTDIRTY);
|
|
||||||
n = dummy_section(&d->map, fv, &io_mem_rom);
|
|
||||||
assert(n == PHYS_SECTION_ROM);
|
|
||||||
|
|
||||||
d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
|
d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
|
||||||
|
|
||||||
|
|
|
@ -261,7 +261,6 @@ static void cpu_common_reset(CPUState *cpu)
|
||||||
cpu->interrupt_request = 0;
|
cpu->interrupt_request = 0;
|
||||||
cpu->halted = 0;
|
cpu->halted = 0;
|
||||||
cpu->mem_io_pc = 0;
|
cpu->mem_io_pc = 0;
|
||||||
cpu->mem_io_vaddr = 0;
|
|
||||||
cpu->icount_extra = 0;
|
cpu->icount_extra = 0;
|
||||||
atomic_set(&cpu->icount_decr_ptr->u32, 0);
|
atomic_set(&cpu->icount_decr_ptr->u32, 0);
|
||||||
cpu->can_do_io = 1;
|
cpu->can_do_io = 1;
|
||||||
|
|
|
@ -317,26 +317,35 @@ CPUArchState *cpu_copy(CPUArchState *env);
|
||||||
|
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
|
|
||||||
/* Flags stored in the low bits of the TLB virtual address. These are
|
/*
|
||||||
* defined so that fast path ram access is all zeros.
|
* Flags stored in the low bits of the TLB virtual address.
|
||||||
|
* These are defined so that fast path ram access is all zeros.
|
||||||
* The flags all must be between TARGET_PAGE_BITS and
|
* The flags all must be between TARGET_PAGE_BITS and
|
||||||
* maximum address alignment bit.
|
* maximum address alignment bit.
|
||||||
|
*
|
||||||
|
* Use TARGET_PAGE_BITS_MIN so that these bits are constant
|
||||||
|
* when TARGET_PAGE_BITS_VARY is in effect.
|
||||||
*/
|
*/
|
||||||
/* Zero if TLB entry is valid. */
|
/* Zero if TLB entry is valid. */
|
||||||
#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS - 1))
|
#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
|
||||||
/* Set if TLB entry references a clean RAM page. The iotlb entry will
|
/* Set if TLB entry references a clean RAM page. The iotlb entry will
|
||||||
contain the page physical address. */
|
contain the page physical address. */
|
||||||
#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2))
|
#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
|
||||||
/* Set if TLB entry is an IO callback. */
|
/* Set if TLB entry is an IO callback. */
|
||||||
#define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3))
|
#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
|
||||||
/* Set if TLB entry contains a watchpoint. */
|
/* Set if TLB entry contains a watchpoint. */
|
||||||
#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS - 4))
|
#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
|
||||||
|
/* Set if TLB entry requires byte swap. */
|
||||||
|
#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5))
|
||||||
|
/* Set if TLB entry writes ignored. */
|
||||||
|
#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6))
|
||||||
|
|
||||||
/* Use this mask to check interception with an alignment mask
|
/* Use this mask to check interception with an alignment mask
|
||||||
* in a TCG backend.
|
* in a TCG backend.
|
||||||
*/
|
*/
|
||||||
#define TLB_FLAGS_MASK \
|
#define TLB_FLAGS_MASK \
|
||||||
(TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO | TLB_WATCHPOINT)
|
(TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
|
||||||
|
| TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* tlb_hit_page: return true if page aligned @addr is a hit against the
|
* tlb_hit_page: return true if page aligned @addr is a hit against the
|
||||||
|
|
|
@ -100,9 +100,6 @@ void qemu_flush_coalesced_mmio_buffer(void);
|
||||||
|
|
||||||
void cpu_flush_icache_range(hwaddr start, hwaddr len);
|
void cpu_flush_icache_range(hwaddr start, hwaddr len);
|
||||||
|
|
||||||
extern struct MemoryRegion io_mem_rom;
|
|
||||||
extern struct MemoryRegion io_mem_notdirty;
|
|
||||||
|
|
||||||
typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque);
|
typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque);
|
||||||
|
|
||||||
int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque);
|
int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque);
|
||||||
|
|
|
@ -509,11 +509,7 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
|
||||||
hwaddr *xlat, hwaddr *plen,
|
hwaddr *xlat, hwaddr *plen,
|
||||||
MemTxAttrs attrs, int *prot);
|
MemTxAttrs attrs, int *prot);
|
||||||
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
|
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
|
||||||
MemoryRegionSection *section,
|
MemoryRegionSection *section);
|
||||||
target_ulong vaddr,
|
|
||||||
hwaddr paddr, hwaddr xlat,
|
|
||||||
int prot,
|
|
||||||
target_ulong *address);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* vl.c */
|
/* vl.c */
|
||||||
|
|
|
@ -49,70 +49,5 @@ void address_space_dispatch_free(AddressSpaceDispatch *d);
|
||||||
|
|
||||||
void mtree_print_dispatch(struct AddressSpaceDispatch *d,
|
void mtree_print_dispatch(struct AddressSpaceDispatch *d,
|
||||||
MemoryRegion *root);
|
MemoryRegion *root);
|
||||||
|
|
||||||
struct page_collection;
|
|
||||||
|
|
||||||
/* Opaque struct for passing info from memory_notdirty_write_prepare()
|
|
||||||
* to memory_notdirty_write_complete(). Callers should treat all fields
|
|
||||||
* as private, with the exception of @active.
|
|
||||||
*
|
|
||||||
* @active is a field which is not touched by either the prepare or
|
|
||||||
* complete functions, but which the caller can use if it wishes to
|
|
||||||
* track whether it has called prepare for this struct and so needs
|
|
||||||
* to later call the complete function.
|
|
||||||
*/
|
|
||||||
typedef struct {
|
|
||||||
CPUState *cpu;
|
|
||||||
struct page_collection *pages;
|
|
||||||
ram_addr_t ram_addr;
|
|
||||||
vaddr mem_vaddr;
|
|
||||||
unsigned size;
|
|
||||||
bool active;
|
|
||||||
} NotDirtyInfo;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* memory_notdirty_write_prepare: call before writing to non-dirty memory
|
|
||||||
* @ndi: pointer to opaque NotDirtyInfo struct
|
|
||||||
* @cpu: CPU doing the write
|
|
||||||
* @mem_vaddr: virtual address of write
|
|
||||||
* @ram_addr: the ram address of the write
|
|
||||||
* @size: size of write in bytes
|
|
||||||
*
|
|
||||||
* Any code which writes to the host memory corresponding to
|
|
||||||
* guest RAM which has been marked as NOTDIRTY must wrap those
|
|
||||||
* writes in calls to memory_notdirty_write_prepare() and
|
|
||||||
* memory_notdirty_write_complete():
|
|
||||||
*
|
|
||||||
* NotDirtyInfo ndi;
|
|
||||||
* memory_notdirty_write_prepare(&ndi, ....);
|
|
||||||
* ... perform write here ...
|
|
||||||
* memory_notdirty_write_complete(&ndi);
|
|
||||||
*
|
|
||||||
* These calls will ensure that we flush any TCG translated code for
|
|
||||||
* the memory being written, update the dirty bits and (if possible)
|
|
||||||
* remove the slowpath callback for writing to the memory.
|
|
||||||
*
|
|
||||||
* This must only be called if we are using TCG; it will assert otherwise.
|
|
||||||
*
|
|
||||||
* We may take locks in the prepare call, so callers must ensure that
|
|
||||||
* they don't exit (via longjump or otherwise) without calling complete.
|
|
||||||
*
|
|
||||||
* This call must only be made inside an RCU critical section.
|
|
||||||
* (Note that while we're executing a TCG TB we're always in an
|
|
||||||
* RCU critical section, which is likely to be the case for callers
|
|
||||||
* of these functions.)
|
|
||||||
*/
|
|
||||||
void memory_notdirty_write_prepare(NotDirtyInfo *ndi,
|
|
||||||
CPUState *cpu,
|
|
||||||
vaddr mem_vaddr,
|
|
||||||
ram_addr_t ram_addr,
|
|
||||||
unsigned size);
|
|
||||||
/**
|
|
||||||
* memory_notdirty_write_complete: finish write to non-dirty memory
|
|
||||||
* @ndi: pointer to the opaque NotDirtyInfo struct which was initialized
|
|
||||||
* by memory_not_dirty_write_prepare().
|
|
||||||
*/
|
|
||||||
void memory_notdirty_write_complete(NotDirtyInfo *ndi);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -338,7 +338,6 @@ struct qemu_work_item;
|
||||||
* @next_cpu: Next CPU sharing TB cache.
|
* @next_cpu: Next CPU sharing TB cache.
|
||||||
* @opaque: User data.
|
* @opaque: User data.
|
||||||
* @mem_io_pc: Host Program Counter at which the memory was accessed.
|
* @mem_io_pc: Host Program Counter at which the memory was accessed.
|
||||||
* @mem_io_vaddr: Target virtual address at which the memory was accessed.
|
|
||||||
* @kvm_fd: vCPU file descriptor for KVM.
|
* @kvm_fd: vCPU file descriptor for KVM.
|
||||||
* @work_mutex: Lock to prevent multiple access to queued_work_*.
|
* @work_mutex: Lock to prevent multiple access to queued_work_*.
|
||||||
* @queued_work_first: First asynchronous work pending.
|
* @queued_work_first: First asynchronous work pending.
|
||||||
|
@ -413,7 +412,6 @@ struct CPUState {
|
||||||
* we store some rarely used information in the CPU context.
|
* we store some rarely used information in the CPU context.
|
||||||
*/
|
*/
|
||||||
uintptr_t mem_io_pc;
|
uintptr_t mem_io_pc;
|
||||||
vaddr mem_io_vaddr;
|
|
||||||
/*
|
/*
|
||||||
* This is only needed for the legacy cpu_unassigned_access() hook;
|
* This is only needed for the legacy cpu_unassigned_access() hook;
|
||||||
* when all targets using it have been converted to use
|
* when all targets using it have been converted to use
|
||||||
|
|
|
@ -170,6 +170,17 @@
|
||||||
# define QEMU_NONSTRING
|
# define QEMU_NONSTRING
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Forced inlining may be desired to encourage constant propagation
|
||||||
|
* of function parameters. However, it can also make debugging harder,
|
||||||
|
* so disable it for a non-optimizing build.
|
||||||
|
*/
|
||||||
|
#if defined(__OPTIMIZE__)
|
||||||
|
#define QEMU_ALWAYS_INLINE __attribute__((always_inline))
|
||||||
|
#else
|
||||||
|
#define QEMU_ALWAYS_INLINE
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Implement C11 _Generic via GCC builtins. Example:
|
/* Implement C11 _Generic via GCC builtins. Example:
|
||||||
*
|
*
|
||||||
* QEMU_GENERIC(x, (float, sinf), (long double, sinl), sin) (x)
|
* QEMU_GENERIC(x, (float, sinf), (long double, sinl), sin) (x)
|
||||||
|
@ -210,4 +221,19 @@
|
||||||
#define QEMU_GENERIC9(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC8(x, __VA_ARGS__))
|
#define QEMU_GENERIC9(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC8(x, __VA_ARGS__))
|
||||||
#define QEMU_GENERIC10(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC9(x, __VA_ARGS__))
|
#define QEMU_GENERIC10(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC9(x, __VA_ARGS__))
|
||||||
|
|
||||||
|
/**
|
||||||
|
* qemu_build_not_reached()
|
||||||
|
*
|
||||||
|
* The compiler, during optimization, is expected to prove that a call
|
||||||
|
* to this function cannot be reached and remove it. If the compiler
|
||||||
|
* supports QEMU_ERROR, this will be reported at compile time; otherwise
|
||||||
|
* this will be reported at link time due to the missing symbol.
|
||||||
|
*/
|
||||||
|
#ifdef __OPTIMIZE__
|
||||||
|
extern void QEMU_NORETURN QEMU_ERROR("code path is reachable")
|
||||||
|
qemu_build_not_reached(void);
|
||||||
|
#else
|
||||||
|
#define qemu_build_not_reached() g_assert_not_reached()
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* COMPILER_H */
|
#endif /* COMPILER_H */
|
||||||
|
|
20
memory.c
20
memory.c
|
@ -434,11 +434,6 @@ static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
|
||||||
tmp = mr->ops->read(mr->opaque, addr, size);
|
tmp = mr->ops->read(mr->opaque, addr, size);
|
||||||
if (mr->subpage) {
|
if (mr->subpage) {
|
||||||
trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
|
trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
|
||||||
} else if (mr == &io_mem_notdirty) {
|
|
||||||
/* Accesses to code which has previously been translated into a TB show
|
|
||||||
* up in the MMIO path, as accesses to the io_mem_notdirty
|
|
||||||
* MemoryRegion. */
|
|
||||||
trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
|
|
||||||
} else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
|
} else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
|
||||||
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
|
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
|
||||||
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
|
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
|
||||||
|
@ -461,11 +456,6 @@ static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
|
||||||
r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
|
r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
|
||||||
if (mr->subpage) {
|
if (mr->subpage) {
|
||||||
trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
|
trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
|
||||||
} else if (mr == &io_mem_notdirty) {
|
|
||||||
/* Accesses to code which has previously been translated into a TB show
|
|
||||||
* up in the MMIO path, as accesses to the io_mem_notdirty
|
|
||||||
* MemoryRegion. */
|
|
||||||
trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
|
|
||||||
} else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
|
} else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
|
||||||
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
|
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
|
||||||
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
|
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
|
||||||
|
@ -486,11 +476,6 @@ static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
|
||||||
|
|
||||||
if (mr->subpage) {
|
if (mr->subpage) {
|
||||||
trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
|
trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
|
||||||
} else if (mr == &io_mem_notdirty) {
|
|
||||||
/* Accesses to code which has previously been translated into a TB show
|
|
||||||
* up in the MMIO path, as accesses to the io_mem_notdirty
|
|
||||||
* MemoryRegion. */
|
|
||||||
trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
|
|
||||||
} else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
|
} else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
|
||||||
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
|
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
|
||||||
trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
|
trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
|
||||||
|
@ -511,11 +496,6 @@ static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
|
||||||
|
|
||||||
if (mr->subpage) {
|
if (mr->subpage) {
|
||||||
trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
|
trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
|
||||||
} else if (mr == &io_mem_notdirty) {
|
|
||||||
/* Accesses to code which has previously been translated into a TB show
|
|
||||||
* up in the MMIO path, as accesses to the io_mem_notdirty
|
|
||||||
* MemoryRegion. */
|
|
||||||
trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
|
|
||||||
} else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
|
} else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
|
||||||
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
|
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
|
||||||
trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
|
trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
|
||||||
|
|
|
@ -52,14 +52,14 @@ dma_map_wait(void *dbs) "dbs=%p"
|
||||||
find_ram_offset(uint64_t size, uint64_t offset) "size: 0x%" PRIx64 " @ 0x%" PRIx64
|
find_ram_offset(uint64_t size, uint64_t offset) "size: 0x%" PRIx64 " @ 0x%" PRIx64
|
||||||
find_ram_offset_loop(uint64_t size, uint64_t candidate, uint64_t offset, uint64_t next, uint64_t mingap) "trying size: 0x%" PRIx64 " @ 0x%" PRIx64 ", offset: 0x%" PRIx64" next: 0x%" PRIx64 " mingap: 0x%" PRIx64
|
find_ram_offset_loop(uint64_t size, uint64_t candidate, uint64_t offset, uint64_t next, uint64_t mingap) "trying size: 0x%" PRIx64 " @ 0x%" PRIx64 ", offset: 0x%" PRIx64" next: 0x%" PRIx64 " mingap: 0x%" PRIx64
|
||||||
ram_block_discard_range(const char *rbname, void *hva, size_t length, bool need_madvise, bool need_fallocate, int ret) "%s@%p + 0x%zx: madvise: %d fallocate: %d ret: %d"
|
ram_block_discard_range(const char *rbname, void *hva, size_t length, bool need_madvise, bool need_fallocate, int ret) "%s@%p + 0x%zx: madvise: %d fallocate: %d ret: %d"
|
||||||
|
memory_notdirty_write_access(uint64_t vaddr, uint64_t ram_addr, unsigned size) "0x%" PRIx64 " ram_addr 0x%" PRIx64 " size %u"
|
||||||
|
memory_notdirty_set_dirty(uint64_t vaddr) "0x%" PRIx64
|
||||||
|
|
||||||
# memory.c
|
# memory.c
|
||||||
memory_region_ops_read(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
|
memory_region_ops_read(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
|
||||||
memory_region_ops_write(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
|
memory_region_ops_write(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
|
||||||
memory_region_subpage_read(int cpu_index, void *mr, uint64_t offset, uint64_t value, unsigned size) "cpu %d mr %p offset 0x%"PRIx64" value 0x%"PRIx64" size %u"
|
memory_region_subpage_read(int cpu_index, void *mr, uint64_t offset, uint64_t value, unsigned size) "cpu %d mr %p offset 0x%"PRIx64" value 0x%"PRIx64" size %u"
|
||||||
memory_region_subpage_write(int cpu_index, void *mr, uint64_t offset, uint64_t value, unsigned size) "cpu %d mr %p offset 0x%"PRIx64" value 0x%"PRIx64" size %u"
|
memory_region_subpage_write(int cpu_index, void *mr, uint64_t offset, uint64_t value, unsigned size) "cpu %d mr %p offset 0x%"PRIx64" value 0x%"PRIx64" size %u"
|
||||||
memory_region_tb_read(int cpu_index, uint64_t addr, uint64_t value, unsigned size) "cpu %d addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
|
|
||||||
memory_region_tb_write(int cpu_index, uint64_t addr, uint64_t value, unsigned size) "cpu %d addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
|
|
||||||
memory_region_ram_device_read(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
|
memory_region_ram_device_read(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
|
||||||
memory_region_ram_device_write(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
|
memory_region_ram_device_write(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
|
||||||
flatview_new(void *view, void *root) "%p (root %p)"
|
flatview_new(void *view, void *root) "%p (root %p)"
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue