mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-03 07:43:54 -06:00
Add CPUClass::tlb_fill.
Improve tlb_vaddr_to_host for use by ARM SVE no-fault loads. -----BEGIN PGP SIGNATURE----- iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAlzVx4UdHHJpY2hhcmQu aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV+U1Af/b3cV5d5a1LWRdLgR 71JCPK/M3o43r2U9wCSikteXkmNBEdEoc5+WRk2SuZFLW/JB1DHDY7/gISPIhfoB ZIza2TxD/QK1CQ5/mMWruKBlyygbYYZgsYaaNsMJRJgicgOSjTN0nuHMbIfv3tAN mu+IlkD0LdhVjP0fz30Jpew3b3575RCjYxEPM6KQI3RxtQFjZ3FhqV5hKR4vtdP5 yLWJQzwAbaCB3SZUvvp7TN1ZsmeyLpc+Yz/YtRTqQedo7SNWWBKldLhqq4bZnH1I AkzHbtWIOBrjWJ34ZMAgI5Q56Du9TBbBvCdM9azmrQjSu/2kdsPBPcUyOpnUCsCx NyXo9g== =x71l -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20190510' into staging Add CPUClass::tlb_fill. Improve tlb_vaddr_to_host for use by ARM SVE no-fault loads. # gpg: Signature made Fri 10 May 2019 19:48:37 BST # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * remotes/rth/tags/pull-tcg-20190510: (27 commits) tcg: Use tlb_fill probe from tlb_vaddr_to_host tcg: Remove CPUClass::handle_mmu_fault tcg: Use CPUClass::tlb_fill in cputlb.c target/xtensa: Convert to CPUClass::tlb_fill target/unicore32: Convert to CPUClass::tlb_fill target/tricore: Convert to CPUClass::tlb_fill target/tilegx: Convert to CPUClass::tlb_fill target/sparc: Convert to CPUClass::tlb_fill target/sh4: Convert to CPUClass::tlb_fill target/s390x: Convert to CPUClass::tlb_fill target/riscv: Convert to CPUClass::tlb_fill target/ppc: Convert to CPUClass::tlb_fill target/openrisc: Convert to CPUClass::tlb_fill target/nios2: Convert to CPUClass::tlb_fill target/moxie: Convert to CPUClass::tlb_fill target/mips: Convert to CPUClass::tlb_fill target/mips: Tidy control flow in mips_cpu_handle_mmu_fault target/mips: Pass a valid error to raise_mmu_exception for user-only target/microblaze: Convert to CPUClass::tlb_fill target/m68k: Convert to CPUClass::tlb_fill ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
d8276573da
83 changed files with 868 additions and 1131 deletions
|
@ -855,6 +855,25 @@ static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
|
|||
return ram_addr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
|
||||
* caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
|
||||
* be discarded and looked up again (e.g. via tlb_entry()).
|
||||
*/
|
||||
static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
|
||||
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
bool ok;
|
||||
|
||||
/*
|
||||
* This is not a probe, so only valid return is success; failure
|
||||
* should result in exception + longjmp to the cpu loop.
|
||||
*/
|
||||
ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr);
|
||||
assert(ok);
|
||||
}
|
||||
|
||||
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
||||
int mmu_idx, target_ulong addr, uintptr_t retaddr,
|
||||
MMUAccessType access_type, int size)
|
||||
|
@ -938,6 +957,16 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
|||
}
|
||||
}
|
||||
|
||||
static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
|
||||
{
|
||||
#if TCG_OVERSIZED_GUEST
|
||||
return *(target_ulong *)((uintptr_t)entry + ofs);
|
||||
#else
|
||||
/* ofs might correspond to .addr_write, so use atomic_read */
|
||||
return atomic_read((target_ulong *)((uintptr_t)entry + ofs));
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Return true if ADDR is present in the victim tlb, and has been copied
|
||||
back to the main tlb. */
|
||||
static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
|
||||
|
@ -948,14 +977,7 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
|
|||
assert_cpu_is_self(ENV_GET_CPU(env));
|
||||
for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
|
||||
CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
|
||||
target_ulong cmp;
|
||||
|
||||
/* elt_ofs might correspond to .addr_write, so use atomic_read */
|
||||
#if TCG_OVERSIZED_GUEST
|
||||
cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
|
||||
#else
|
||||
cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
|
||||
#endif
|
||||
target_ulong cmp = tlb_read_ofs(vtlb, elt_ofs);
|
||||
|
||||
if (cmp == page) {
|
||||
/* Found entry in victim tlb, swap tlb and iotlb. */
|
||||
|
@ -1039,6 +1061,56 @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
|
|||
}
|
||||
}
|
||||
|
||||
void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
|
||||
MMUAccessType access_type, int mmu_idx)
|
||||
{
|
||||
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
|
||||
uintptr_t tlb_addr, page;
|
||||
size_t elt_ofs;
|
||||
|
||||
switch (access_type) {
|
||||
case MMU_DATA_LOAD:
|
||||
elt_ofs = offsetof(CPUTLBEntry, addr_read);
|
||||
break;
|
||||
case MMU_DATA_STORE:
|
||||
elt_ofs = offsetof(CPUTLBEntry, addr_write);
|
||||
break;
|
||||
case MMU_INST_FETCH:
|
||||
elt_ofs = offsetof(CPUTLBEntry, addr_code);
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
page = addr & TARGET_PAGE_MASK;
|
||||
tlb_addr = tlb_read_ofs(entry, elt_ofs);
|
||||
|
||||
if (!tlb_hit_page(tlb_addr, page)) {
|
||||
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
||||
|
||||
if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) {
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
CPUClass *cc = CPU_GET_CLASS(cs);
|
||||
|
||||
if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) {
|
||||
/* Non-faulting page table read failed. */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* TLB resize via tlb_fill may have moved the entry. */
|
||||
entry = tlb_entry(env, mmu_idx, addr);
|
||||
}
|
||||
tlb_addr = tlb_read_ofs(entry, elt_ofs);
|
||||
}
|
||||
|
||||
if (tlb_addr & ~TARGET_PAGE_MASK) {
|
||||
/* IO access */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (void *)((uintptr_t)addr + entry->addend);
|
||||
}
|
||||
|
||||
/* Probe for a read-modify-write atomic operation. Do not allow unaligned
|
||||
* operations, or io operations to proceed. Return the host address. */
|
||||
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue