mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-09 18:44:58 -06:00
Allow page table bit to swap endianness.
Reorganize watchpoints out of i/o path. Return host address from probe_write / probe_access. -----BEGIN PGP SIGNATURE----- iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAl1uiyYdHHJpY2hhcmQu aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV8AuwgAnYLQQbL8kjSqzp7q gRlj0M2SX41ZW3fMkI794RwsljD9Z0QS7YGnpzHolig9XUYrGnip7STrMvlCr/1L CIMWNHlgitgBMszLqg42/TB+6RxXn+DMX/ShUzTagC6xQhinCIpdEjoLaTKSgeP+ foIyJ2uoJLKOBP8cPTQp8evongtoQIljpsZZ0K8a4sreO1d6ytH+olkuoGiROft+ VoJkA+kNHd9cE+LPCva8UFGu1QE6uCySvhepzOpnvOtK+SXKUm2yLOFGu7RWP1pT RkE0oRyRnImtg+cViHfUUFogIffFROdL5tuYMQVuqbINeROPUgJPav+R1Nz1P60a xM2HEw== =bLLU -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20190903' into staging Allow page table bit to swap endianness. Reorganize watchpoints out of i/o path. Return host address from probe_write / probe_access. # gpg: Signature made Tue 03 Sep 2019 16:47:50 BST # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * remotes/rth/tags/pull-tcg-20190903: (36 commits) tcg: Factor out probe_write() logic into probe_access() tcg: Make probe_write() return a pointer to the host page s390x/tcg: Pass a size to probe_write() in do_csst() hppa/tcg: Call probe_write() also for CONFIG_USER_ONLY mips/tcg: Call probe_write() for CONFIG_USER_ONLY as well tcg: Enforce single page access in probe_write() tcg: Factor out CONFIG_USER_ONLY probe_write() from s390x code s390x/tcg: Fix length calculation in probe_write_access() s390x/tcg: Use guest_addr_valid() instead of h2g_valid() in probe_write_access() tcg: Check for watchpoints in probe_write() cputlb: Handle watchpoints via TLB_WATCHPOINT cputlb: Remove double-alignment in store_helper cputlb: Fix size operand for tlb_fill on unaligned store exec: Factor out cpu_watchpoint_address_matches cputlb: Fold TLB_RECHECK into TLB_INVALID_MASK exec: Factor out core logic of check_watchpoint() exec: Move user-only watchpoint stubs inline target/sparc: sun4u Invert Endian TTE bit target/sparc: Add TLB entry with attributes cputlb: Byte swap memory transaction attribute ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
9de65783e1
57 changed files with 918 additions and 865 deletions
|
@ -710,6 +710,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
|||
hwaddr iotlb, xlat, sz, paddr_page;
|
||||
target_ulong vaddr_page;
|
||||
int asidx = cpu_asidx_from_attrs(cpu, attrs);
|
||||
int wp_flags;
|
||||
|
||||
assert_cpu_is_self(cpu);
|
||||
|
||||
|
@ -732,11 +733,12 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
|||
|
||||
address = vaddr_page;
|
||||
if (size < TARGET_PAGE_SIZE) {
|
||||
/*
|
||||
* Slow-path the TLB entries; we will repeat the MMU check and TLB
|
||||
* fill on every access.
|
||||
*/
|
||||
address |= TLB_RECHECK;
|
||||
/* Repeat the MMU check and TLB fill on every access. */
|
||||
address |= TLB_INVALID_MASK;
|
||||
}
|
||||
if (attrs.byte_swap) {
|
||||
/* Force the access through the I/O slow path. */
|
||||
address |= TLB_MMIO;
|
||||
}
|
||||
if (!memory_region_is_ram(section->mr) &&
|
||||
!memory_region_is_romd(section->mr)) {
|
||||
|
@ -751,6 +753,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
|||
code_address = address;
|
||||
iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
|
||||
paddr_page, xlat, prot, &address);
|
||||
wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
|
||||
TARGET_PAGE_SIZE);
|
||||
|
||||
index = tlb_index(env, mmu_idx, vaddr_page);
|
||||
te = tlb_entry(env, mmu_idx, vaddr_page);
|
||||
|
@ -804,6 +808,9 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
|||
tn.addend = addend - vaddr_page;
|
||||
if (prot & PAGE_READ) {
|
||||
tn.addr_read = address;
|
||||
if (wp_flags & BP_MEM_READ) {
|
||||
tn.addr_read |= TLB_WATCHPOINT;
|
||||
}
|
||||
} else {
|
||||
tn.addr_read = -1;
|
||||
}
|
||||
|
@ -830,6 +837,9 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
|||
if (prot & PAGE_WRITE_INV) {
|
||||
tn.addr_write |= TLB_INVALID_MASK;
|
||||
}
|
||||
if (wp_flags & BP_MEM_WRITE) {
|
||||
tn.addr_write |= TLB_WATCHPOINT;
|
||||
}
|
||||
}
|
||||
|
||||
copy_tlb_helper_locked(te, &tn);
|
||||
|
@ -881,7 +891,7 @@ static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
|
|||
|
||||
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
||||
int mmu_idx, target_ulong addr, uintptr_t retaddr,
|
||||
MMUAccessType access_type, int size)
|
||||
MMUAccessType access_type, MemOp op)
|
||||
{
|
||||
CPUState *cpu = env_cpu(env);
|
||||
hwaddr mr_offset;
|
||||
|
@ -891,6 +901,10 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
|||
bool locked = false;
|
||||
MemTxResult r;
|
||||
|
||||
if (iotlbentry->attrs.byte_swap) {
|
||||
op ^= MO_BSWAP;
|
||||
}
|
||||
|
||||
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
|
||||
mr = section->mr;
|
||||
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
|
||||
|
@ -906,14 +920,13 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
|||
qemu_mutex_lock_iothread();
|
||||
locked = true;
|
||||
}
|
||||
r = memory_region_dispatch_read(mr, mr_offset,
|
||||
&val, size, iotlbentry->attrs);
|
||||
r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs);
|
||||
if (r != MEMTX_OK) {
|
||||
hwaddr physaddr = mr_offset +
|
||||
section->offset_within_address_space -
|
||||
section->offset_within_region;
|
||||
|
||||
cpu_transaction_failed(cpu, physaddr, addr, size, access_type,
|
||||
cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
|
||||
mmu_idx, iotlbentry->attrs, r, retaddr);
|
||||
}
|
||||
if (locked) {
|
||||
|
@ -925,7 +938,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
|||
|
||||
static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
||||
int mmu_idx, uint64_t val, target_ulong addr,
|
||||
uintptr_t retaddr, int size)
|
||||
uintptr_t retaddr, MemOp op)
|
||||
{
|
||||
CPUState *cpu = env_cpu(env);
|
||||
hwaddr mr_offset;
|
||||
|
@ -934,6 +947,10 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
|||
bool locked = false;
|
||||
MemTxResult r;
|
||||
|
||||
if (iotlbentry->attrs.byte_swap) {
|
||||
op ^= MO_BSWAP;
|
||||
}
|
||||
|
||||
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
|
||||
mr = section->mr;
|
||||
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
|
||||
|
@ -947,15 +964,15 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
|||
qemu_mutex_lock_iothread();
|
||||
locked = true;
|
||||
}
|
||||
r = memory_region_dispatch_write(mr, mr_offset,
|
||||
val, size, iotlbentry->attrs);
|
||||
r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs);
|
||||
if (r != MEMTX_OK) {
|
||||
hwaddr physaddr = mr_offset +
|
||||
section->offset_within_address_space -
|
||||
section->offset_within_region;
|
||||
|
||||
cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
|
||||
mmu_idx, iotlbentry->attrs, r, retaddr);
|
||||
cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
|
||||
MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r,
|
||||
retaddr);
|
||||
}
|
||||
if (locked) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
|
@ -1015,10 +1032,15 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
|
|||
victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
|
||||
(ADDR) & TARGET_PAGE_MASK)
|
||||
|
||||
/* NOTE: this function can trigger an exception */
|
||||
/* NOTE2: the returned address is not exactly the physical address: it
|
||||
* is actually a ram_addr_t (in system mode; the user mode emulation
|
||||
* version of this function returns a guest virtual address).
|
||||
/*
|
||||
* Return a ram_addr_t for the virtual address for execution.
|
||||
*
|
||||
* Return -1 if we can't translate and execute from an entire page
|
||||
* of RAM. This will force us to execute by loading and translating
|
||||
* one insn at a time, without caching.
|
||||
*
|
||||
* NOTE: This function will trigger an exception if the page is
|
||||
* not executable.
|
||||
*/
|
||||
tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
|
||||
{
|
||||
|
@ -1032,19 +1054,20 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
|
|||
tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
|
||||
index = tlb_index(env, mmu_idx, addr);
|
||||
entry = tlb_entry(env, mmu_idx, addr);
|
||||
|
||||
if (unlikely(entry->addr_code & TLB_INVALID_MASK)) {
|
||||
/*
|
||||
* The MMU protection covers a smaller range than a target
|
||||
* page, so we must redo the MMU check for every insn.
|
||||
*/
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
assert(tlb_hit(entry->addr_code, addr));
|
||||
}
|
||||
|
||||
if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) {
|
||||
/*
|
||||
* Return -1 if we can't translate and execute from an entire
|
||||
* page of RAM here, which will cause us to execute by loading
|
||||
* and translating one insn at a time, without caching:
|
||||
* - TLB_RECHECK: means the MMU protection covers a smaller range
|
||||
* than a target page, so we must redo the MMU check every insn
|
||||
* - TLB_MMIO: region is not backed by RAM
|
||||
*/
|
||||
if (unlikely(entry->addr_code & TLB_MMIO)) {
|
||||
/* The region is not backed by RAM. */
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -1052,25 +1075,70 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
|
|||
return qemu_ram_addr_from_host_nofail(p);
|
||||
}
|
||||
|
||||
/* Probe for whether the specified guest write access is permitted.
|
||||
* If it is not permitted then an exception will be taken in the same
|
||||
* way as if this were a real write access (and we will not return).
|
||||
* Otherwise the function will return, and there will be a valid
|
||||
* entry in the TLB for this access.
|
||||
/*
|
||||
* Probe for whether the specified guest access is permitted. If it is not
|
||||
* permitted then an exception will be taken in the same way as if this
|
||||
* were a real access (and we will not return).
|
||||
* If the size is 0 or the page requires I/O access, returns NULL; otherwise,
|
||||
* returns the address of the host page similar to tlb_vaddr_to_host().
|
||||
*/
|
||||
void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
|
||||
uintptr_t retaddr)
|
||||
void *probe_access(CPUArchState *env, target_ulong addr, int size,
|
||||
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
|
||||
{
|
||||
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
||||
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
|
||||
target_ulong tlb_addr;
|
||||
size_t elt_ofs;
|
||||
int wp_access;
|
||||
|
||||
if (!tlb_hit(tlb_addr_write(entry), addr)) {
|
||||
/* TLB entry is for a different page */
|
||||
if (!VICTIM_TLB_HIT(addr_write, addr)) {
|
||||
tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
|
||||
mmu_idx, retaddr);
|
||||
}
|
||||
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
|
||||
|
||||
switch (access_type) {
|
||||
case MMU_DATA_LOAD:
|
||||
elt_ofs = offsetof(CPUTLBEntry, addr_read);
|
||||
wp_access = BP_MEM_READ;
|
||||
break;
|
||||
case MMU_DATA_STORE:
|
||||
elt_ofs = offsetof(CPUTLBEntry, addr_write);
|
||||
wp_access = BP_MEM_WRITE;
|
||||
break;
|
||||
case MMU_INST_FETCH:
|
||||
elt_ofs = offsetof(CPUTLBEntry, addr_code);
|
||||
wp_access = BP_MEM_READ;
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
tlb_addr = tlb_read_ofs(entry, elt_ofs);
|
||||
|
||||
if (unlikely(!tlb_hit(tlb_addr, addr))) {
|
||||
if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs,
|
||||
addr & TARGET_PAGE_MASK)) {
|
||||
tlb_fill(env_cpu(env), addr, size, access_type, mmu_idx, retaddr);
|
||||
/* TLB resize via tlb_fill may have moved the entry. */
|
||||
index = tlb_index(env, mmu_idx, addr);
|
||||
entry = tlb_entry(env, mmu_idx, addr);
|
||||
}
|
||||
tlb_addr = tlb_read_ofs(entry, elt_ofs);
|
||||
}
|
||||
|
||||
if (!size) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Handle watchpoints. */
|
||||
if (tlb_addr & TLB_WATCHPOINT) {
|
||||
cpu_check_watchpoint(env_cpu(env), addr, size,
|
||||
env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
|
||||
wp_access, retaddr);
|
||||
}
|
||||
|
||||
if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO)) {
|
||||
/* I/O access */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (void *)((uintptr_t)addr + entry->addend);
|
||||
}
|
||||
|
||||
void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
|
||||
|
@ -1133,7 +1201,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
|||
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
||||
CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
|
||||
target_ulong tlb_addr = tlb_addr_write(tlbe);
|
||||
TCGMemOp mop = get_memop(oi);
|
||||
MemOp mop = get_memop(oi);
|
||||
int a_bits = get_alignment_bits(mop);
|
||||
int s_bits = mop & MO_SIZE;
|
||||
void *hostaddr;
|
||||
|
@ -1169,7 +1237,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
|||
}
|
||||
|
||||
/* Notice an IO access or a needs-MMU-lookup access */
|
||||
if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) {
|
||||
if (unlikely(tlb_addr & TLB_MMIO)) {
|
||||
/* There's really nothing that can be done to
|
||||
support this apart from stop-the-world. */
|
||||
goto stop_the_world;
|
||||
|
@ -1201,37 +1269,6 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
|||
cpu_loop_exit_atomic(env_cpu(env), retaddr);
|
||||
}
|
||||
|
||||
#ifdef TARGET_WORDS_BIGENDIAN
|
||||
#define NEED_BE_BSWAP 0
|
||||
#define NEED_LE_BSWAP 1
|
||||
#else
|
||||
#define NEED_BE_BSWAP 1
|
||||
#define NEED_LE_BSWAP 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Byte Swap Helper
|
||||
*
|
||||
* This should all dead code away depending on the build host and
|
||||
* access type.
|
||||
*/
|
||||
|
||||
static inline uint64_t handle_bswap(uint64_t val, int size, bool big_endian)
|
||||
{
|
||||
if ((big_endian && NEED_BE_BSWAP) || (!big_endian && NEED_LE_BSWAP)) {
|
||||
switch (size) {
|
||||
case 1: return val;
|
||||
case 2: return bswap16(val);
|
||||
case 4: return bswap32(val);
|
||||
case 8: return bswap64(val);
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
} else {
|
||||
return val;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Load Helpers
|
||||
*
|
||||
|
@ -1246,7 +1283,7 @@ typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
|
|||
|
||||
static inline uint64_t __attribute__((always_inline))
|
||||
load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
||||
uintptr_t retaddr, size_t size, bool big_endian, bool code_read,
|
||||
uintptr_t retaddr, MemOp op, bool code_read,
|
||||
FullLoadHelper *full_load)
|
||||
{
|
||||
uintptr_t mmu_idx = get_mmuidx(oi);
|
||||
|
@ -1260,6 +1297,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
|||
unsigned a_bits = get_alignment_bits(get_memop(oi));
|
||||
void *haddr;
|
||||
uint64_t res;
|
||||
size_t size = memop_size(op);
|
||||
|
||||
/* Handle CPU specific unaligned behaviour */
|
||||
if (addr & ((1 << a_bits) - 1)) {
|
||||
|
@ -1277,37 +1315,36 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
|||
entry = tlb_entry(env, mmu_idx, addr);
|
||||
}
|
||||
tlb_addr = code_read ? entry->addr_code : entry->addr_read;
|
||||
tlb_addr &= ~TLB_INVALID_MASK;
|
||||
}
|
||||
|
||||
/* Handle an IO access. */
|
||||
/* Handle anything that isn't just a straight memory access. */
|
||||
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
|
||||
CPUIOTLBEntry *iotlbentry;
|
||||
|
||||
/* For anything that is unaligned, recurse through full_load. */
|
||||
if ((addr & (size - 1)) != 0) {
|
||||
goto do_unaligned_access;
|
||||
}
|
||||
|
||||
if (tlb_addr & TLB_RECHECK) {
|
||||
/*
|
||||
* This is a TLB_RECHECK access, where the MMU protection
|
||||
* covers a smaller range than a target page, and we must
|
||||
* repeat the MMU check here. This tlb_fill() call might
|
||||
* longjump out if this access should cause a guest exception.
|
||||
*/
|
||||
tlb_fill(env_cpu(env), addr, size,
|
||||
access_type, mmu_idx, retaddr);
|
||||
index = tlb_index(env, mmu_idx, addr);
|
||||
entry = tlb_entry(env, mmu_idx, addr);
|
||||
iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
|
||||
|
||||
tlb_addr = code_read ? entry->addr_code : entry->addr_read;
|
||||
tlb_addr &= ~TLB_RECHECK;
|
||||
if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
|
||||
/* RAM access */
|
||||
/* Handle watchpoints. */
|
||||
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
|
||||
/* On watchpoint hit, this will longjmp out. */
|
||||
cpu_check_watchpoint(env_cpu(env), addr, size,
|
||||
iotlbentry->attrs, BP_MEM_READ, retaddr);
|
||||
|
||||
/* The backing page may or may not require I/O. */
|
||||
tlb_addr &= ~TLB_WATCHPOINT;
|
||||
if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
|
||||
goto do_aligned_access;
|
||||
}
|
||||
}
|
||||
|
||||
res = io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index],
|
||||
mmu_idx, addr, retaddr, access_type, size);
|
||||
return handle_bswap(res, size, big_endian);
|
||||
/* Handle I/O access. */
|
||||
return io_readx(env, iotlbentry, mmu_idx, addr,
|
||||
retaddr, access_type, op);
|
||||
}
|
||||
|
||||
/* Handle slow unaligned access (it spans two pages or IO). */
|
||||
|
@ -1324,7 +1361,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
|||
r2 = full_load(env, addr2, oi, retaddr);
|
||||
shift = (addr & (size - 1)) * 8;
|
||||
|
||||
if (big_endian) {
|
||||
if (memop_big_endian(op)) {
|
||||
/* Big-endian combine. */
|
||||
res = (r1 << shift) | (r2 >> ((size * 8) - shift));
|
||||
} else {
|
||||
|
@ -1336,30 +1373,27 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
|||
|
||||
do_aligned_access:
|
||||
haddr = (void *)((uintptr_t)addr + entry->addend);
|
||||
switch (size) {
|
||||
case 1:
|
||||
switch (op) {
|
||||
case MO_UB:
|
||||
res = ldub_p(haddr);
|
||||
break;
|
||||
case 2:
|
||||
if (big_endian) {
|
||||
res = lduw_be_p(haddr);
|
||||
} else {
|
||||
res = lduw_le_p(haddr);
|
||||
}
|
||||
case MO_BEUW:
|
||||
res = lduw_be_p(haddr);
|
||||
break;
|
||||
case 4:
|
||||
if (big_endian) {
|
||||
res = (uint32_t)ldl_be_p(haddr);
|
||||
} else {
|
||||
res = (uint32_t)ldl_le_p(haddr);
|
||||
}
|
||||
case MO_LEUW:
|
||||
res = lduw_le_p(haddr);
|
||||
break;
|
||||
case 8:
|
||||
if (big_endian) {
|
||||
res = ldq_be_p(haddr);
|
||||
} else {
|
||||
res = ldq_le_p(haddr);
|
||||
}
|
||||
case MO_BEUL:
|
||||
res = (uint32_t)ldl_be_p(haddr);
|
||||
break;
|
||||
case MO_LEUL:
|
||||
res = (uint32_t)ldl_le_p(haddr);
|
||||
break;
|
||||
case MO_BEQ:
|
||||
res = ldq_be_p(haddr);
|
||||
break;
|
||||
case MO_LEQ:
|
||||
res = ldq_le_p(haddr);
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
|
@ -1381,8 +1415,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
|||
static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return load_helper(env, addr, oi, retaddr, 1, false, false,
|
||||
full_ldub_mmu);
|
||||
return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
|
||||
|
@ -1394,7 +1427,7 @@ tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
|
|||
static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return load_helper(env, addr, oi, retaddr, 2, false, false,
|
||||
return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
|
||||
full_le_lduw_mmu);
|
||||
}
|
||||
|
||||
|
@ -1407,7 +1440,7 @@ tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
|
|||
static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return load_helper(env, addr, oi, retaddr, 2, true, false,
|
||||
return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
|
||||
full_be_lduw_mmu);
|
||||
}
|
||||
|
||||
|
@ -1420,7 +1453,7 @@ tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
|
|||
static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return load_helper(env, addr, oi, retaddr, 4, false, false,
|
||||
return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
|
||||
full_le_ldul_mmu);
|
||||
}
|
||||
|
||||
|
@ -1433,7 +1466,7 @@ tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
|
|||
static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return load_helper(env, addr, oi, retaddr, 4, true, false,
|
||||
return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
|
||||
full_be_ldul_mmu);
|
||||
}
|
||||
|
||||
|
@ -1446,14 +1479,14 @@ tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
|
|||
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return load_helper(env, addr, oi, retaddr, 8, false, false,
|
||||
return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
|
||||
helper_le_ldq_mmu);
|
||||
}
|
||||
|
||||
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return load_helper(env, addr, oi, retaddr, 8, true, false,
|
||||
return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
|
||||
helper_be_ldq_mmu);
|
||||
}
|
||||
|
||||
|
@ -1499,7 +1532,7 @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
|
|||
|
||||
static inline void __attribute__((always_inline))
|
||||
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr, size_t size, bool big_endian)
|
||||
TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
|
||||
{
|
||||
uintptr_t mmu_idx = get_mmuidx(oi);
|
||||
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
||||
|
@ -1508,6 +1541,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
|||
const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
|
||||
unsigned a_bits = get_alignment_bits(get_memop(oi));
|
||||
void *haddr;
|
||||
size_t size = memop_size(op);
|
||||
|
||||
/* Handle CPU specific unaligned behaviour */
|
||||
if (addr & ((1 << a_bits) - 1)) {
|
||||
|
@ -1527,35 +1561,32 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
|||
tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
|
||||
}
|
||||
|
||||
/* Handle an IO access. */
|
||||
/* Handle anything that isn't just a straight memory access. */
|
||||
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
|
||||
CPUIOTLBEntry *iotlbentry;
|
||||
|
||||
/* For anything that is unaligned, recurse through byte stores. */
|
||||
if ((addr & (size - 1)) != 0) {
|
||||
goto do_unaligned_access;
|
||||
}
|
||||
|
||||
if (tlb_addr & TLB_RECHECK) {
|
||||
/*
|
||||
* This is a TLB_RECHECK access, where the MMU protection
|
||||
* covers a smaller range than a target page, and we must
|
||||
* repeat the MMU check here. This tlb_fill() call might
|
||||
* longjump out if this access should cause a guest exception.
|
||||
*/
|
||||
tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
|
||||
mmu_idx, retaddr);
|
||||
index = tlb_index(env, mmu_idx, addr);
|
||||
entry = tlb_entry(env, mmu_idx, addr);
|
||||
iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
|
||||
|
||||
tlb_addr = tlb_addr_write(entry);
|
||||
tlb_addr &= ~TLB_RECHECK;
|
||||
if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
|
||||
/* RAM access */
|
||||
/* Handle watchpoints. */
|
||||
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
|
||||
/* On watchpoint hit, this will longjmp out. */
|
||||
cpu_check_watchpoint(env_cpu(env), addr, size,
|
||||
iotlbentry->attrs, BP_MEM_WRITE, retaddr);
|
||||
|
||||
/* The backing page may or may not require I/O. */
|
||||
tlb_addr &= ~TLB_WATCHPOINT;
|
||||
if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
|
||||
goto do_aligned_access;
|
||||
}
|
||||
}
|
||||
|
||||
io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx,
|
||||
handle_bswap(val, size, big_endian),
|
||||
addr, retaddr, size);
|
||||
/* Handle I/O access. */
|
||||
io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, op);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1567,6 +1598,8 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
|||
uintptr_t index2;
|
||||
CPUTLBEntry *entry2;
|
||||
target_ulong page2, tlb_addr2;
|
||||
size_t size2;
|
||||
|
||||
do_unaligned_access:
|
||||
/*
|
||||
* Ensure the second page is in the TLB. Note that the first page
|
||||
|
@ -1574,14 +1607,33 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
|||
* cannot evict the first.
|
||||
*/
|
||||
page2 = (addr + size) & TARGET_PAGE_MASK;
|
||||
size2 = (addr + size) & ~TARGET_PAGE_MASK;
|
||||
index2 = tlb_index(env, mmu_idx, page2);
|
||||
entry2 = tlb_entry(env, mmu_idx, page2);
|
||||
tlb_addr2 = tlb_addr_write(entry2);
|
||||
if (!tlb_hit_page(tlb_addr2, page2)
|
||||
&& !victim_tlb_hit(env, mmu_idx, index2, tlb_off,
|
||||
page2 & TARGET_PAGE_MASK)) {
|
||||
tlb_fill(env_cpu(env), page2, size, MMU_DATA_STORE,
|
||||
mmu_idx, retaddr);
|
||||
if (!tlb_hit_page(tlb_addr2, page2)) {
|
||||
if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
|
||||
tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
|
||||
mmu_idx, retaddr);
|
||||
index2 = tlb_index(env, mmu_idx, page2);
|
||||
entry2 = tlb_entry(env, mmu_idx, page2);
|
||||
}
|
||||
tlb_addr2 = tlb_addr_write(entry2);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle watchpoints. Since this may trap, all checks
|
||||
* must happen before any store.
|
||||
*/
|
||||
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
|
||||
cpu_check_watchpoint(env_cpu(env), addr, size - size2,
|
||||
env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
|
||||
BP_MEM_WRITE, retaddr);
|
||||
}
|
||||
if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
|
||||
cpu_check_watchpoint(env_cpu(env), page2, size2,
|
||||
env_tlb(env)->d[mmu_idx].iotlb[index2].attrs,
|
||||
BP_MEM_WRITE, retaddr);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1591,7 +1643,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
|||
*/
|
||||
for (i = 0; i < size; ++i) {
|
||||
uint8_t val8;
|
||||
if (big_endian) {
|
||||
if (memop_big_endian(op)) {
|
||||
/* Big-endian extract. */
|
||||
val8 = val >> (((size - 1) * 8) - (i * 8));
|
||||
} else {
|
||||
|
@ -1605,30 +1657,27 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
|||
|
||||
do_aligned_access:
|
||||
haddr = (void *)((uintptr_t)addr + entry->addend);
|
||||
switch (size) {
|
||||
case 1:
|
||||
switch (op) {
|
||||
case MO_UB:
|
||||
stb_p(haddr, val);
|
||||
break;
|
||||
case 2:
|
||||
if (big_endian) {
|
||||
stw_be_p(haddr, val);
|
||||
} else {
|
||||
stw_le_p(haddr, val);
|
||||
}
|
||||
case MO_BEUW:
|
||||
stw_be_p(haddr, val);
|
||||
break;
|
||||
case 4:
|
||||
if (big_endian) {
|
||||
stl_be_p(haddr, val);
|
||||
} else {
|
||||
stl_le_p(haddr, val);
|
||||
}
|
||||
case MO_LEUW:
|
||||
stw_le_p(haddr, val);
|
||||
break;
|
||||
case 8:
|
||||
if (big_endian) {
|
||||
stq_be_p(haddr, val);
|
||||
} else {
|
||||
stq_le_p(haddr, val);
|
||||
}
|
||||
case MO_BEUL:
|
||||
stl_be_p(haddr, val);
|
||||
break;
|
||||
case MO_LEUL:
|
||||
stl_le_p(haddr, val);
|
||||
break;
|
||||
case MO_BEQ:
|
||||
stq_be_p(haddr, val);
|
||||
break;
|
||||
case MO_LEQ:
|
||||
stq_le_p(haddr, val);
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
|
@ -1639,43 +1688,43 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
|||
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
store_helper(env, addr, val, oi, retaddr, 1, false);
|
||||
store_helper(env, addr, val, oi, retaddr, MO_UB);
|
||||
}
|
||||
|
||||
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
store_helper(env, addr, val, oi, retaddr, 2, false);
|
||||
store_helper(env, addr, val, oi, retaddr, MO_LEUW);
|
||||
}
|
||||
|
||||
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
store_helper(env, addr, val, oi, retaddr, 2, true);
|
||||
store_helper(env, addr, val, oi, retaddr, MO_BEUW);
|
||||
}
|
||||
|
||||
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
store_helper(env, addr, val, oi, retaddr, 4, false);
|
||||
store_helper(env, addr, val, oi, retaddr, MO_LEUL);
|
||||
}
|
||||
|
||||
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
store_helper(env, addr, val, oi, retaddr, 4, true);
|
||||
store_helper(env, addr, val, oi, retaddr, MO_BEUL);
|
||||
}
|
||||
|
||||
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
store_helper(env, addr, val, oi, retaddr, 8, false);
|
||||
store_helper(env, addr, val, oi, retaddr, MO_LEQ);
|
||||
}
|
||||
|
||||
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
store_helper(env, addr, val, oi, retaddr, 8, true);
|
||||
store_helper(env, addr, val, oi, retaddr, MO_BEQ);
|
||||
}
|
||||
|
||||
/* First set of helpers allows passing in of OI and RETADDR. This makes
|
||||
|
@ -1740,8 +1789,7 @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
|
|||
static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return load_helper(env, addr, oi, retaddr, 1, false, true,
|
||||
full_ldub_cmmu);
|
||||
return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_cmmu);
|
||||
}
|
||||
|
||||
uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
|
||||
|
@ -1753,7 +1801,7 @@ uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
|
|||
static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return load_helper(env, addr, oi, retaddr, 2, false, true,
|
||||
return load_helper(env, addr, oi, retaddr, MO_LEUW, true,
|
||||
full_le_lduw_cmmu);
|
||||
}
|
||||
|
||||
|
@ -1766,7 +1814,7 @@ uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
|
|||
static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return load_helper(env, addr, oi, retaddr, 2, true, true,
|
||||
return load_helper(env, addr, oi, retaddr, MO_BEUW, true,
|
||||
full_be_lduw_cmmu);
|
||||
}
|
||||
|
||||
|
@ -1779,7 +1827,7 @@ uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
|
|||
static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return load_helper(env, addr, oi, retaddr, 4, false, true,
|
||||
return load_helper(env, addr, oi, retaddr, MO_LEUL, true,
|
||||
full_le_ldul_cmmu);
|
||||
}
|
||||
|
||||
|
@ -1792,7 +1840,7 @@ uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
|
|||
static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return load_helper(env, addr, oi, retaddr, 4, true, true,
|
||||
return load_helper(env, addr, oi, retaddr, MO_BEUL, true,
|
||||
full_be_ldul_cmmu);
|
||||
}
|
||||
|
||||
|
@ -1805,13 +1853,13 @@ uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
|
|||
uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return load_helper(env, addr, oi, retaddr, 8, false, true,
|
||||
return load_helper(env, addr, oi, retaddr, MO_LEQ, true,
|
||||
helper_le_ldq_cmmu);
|
||||
}
|
||||
|
||||
uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return load_helper(env, addr, oi, retaddr, 8, true, true,
|
||||
return load_helper(env, addr, oi, retaddr, MO_BEQ, true,
|
||||
helper_be_ldq_cmmu);
|
||||
}
|
||||
|
|
|
@ -188,6 +188,38 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
|
|||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
void *probe_access(CPUArchState *env, target_ulong addr, int size,
|
||||
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
|
||||
{
|
||||
int flags;
|
||||
|
||||
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
|
||||
|
||||
switch (access_type) {
|
||||
case MMU_DATA_STORE:
|
||||
flags = PAGE_WRITE;
|
||||
break;
|
||||
case MMU_DATA_LOAD:
|
||||
flags = PAGE_READ;
|
||||
break;
|
||||
case MMU_INST_FETCH:
|
||||
flags = PAGE_EXEC;
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
if (!guest_addr_valid(addr) || page_check_range(addr, size, flags) < 0) {
|
||||
CPUState *cpu = env_cpu(env);
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
cc->tlb_fill(cpu, addr, size, access_type, MMU_USER_IDX, false,
|
||||
retaddr);
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
return size ? g2h(addr) : NULL;
|
||||
}
|
||||
|
||||
#if defined(__i386__)
|
||||
|
||||
#if defined(__NetBSD__)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue