mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-04 00:03:54 -06:00
accel/tcg: Add probe_access_flags
This new interface will allow targets to probe for a page and then handle watchpoints themselves. This will be most useful for vector predicated memory operations, where one page lookup can be used for many operations, and one test can avoid many watchpoint checks. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20200508154359.7494-6-richard.henderson@linaro.org Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
7a1bfee682
commit
069cfe77d6
4 changed files with 158 additions and 97 deletions
|
@ -1231,86 +1231,16 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Probe for whether the specified guest access is permitted. If it is not
|
||||
* permitted then an exception will be taken in the same way as if this
|
||||
* were a real access (and we will not return).
|
||||
* If the size is 0 or the page requires I/O access, returns NULL; otherwise,
|
||||
* returns the address of the host page similar to tlb_vaddr_to_host().
|
||||
*/
|
||||
void *probe_access(CPUArchState *env, target_ulong addr, int size,
|
||||
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
|
||||
static int probe_access_internal(CPUArchState *env, target_ulong addr,
|
||||
int fault_size, MMUAccessType access_type,
|
||||
int mmu_idx, bool nonfault,
|
||||
void **phost, uintptr_t retaddr)
|
||||
{
|
||||
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
||||
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
|
||||
target_ulong tlb_addr;
|
||||
size_t elt_ofs;
|
||||
int wp_access;
|
||||
|
||||
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
|
||||
|
||||
switch (access_type) {
|
||||
case MMU_DATA_LOAD:
|
||||
elt_ofs = offsetof(CPUTLBEntry, addr_read);
|
||||
wp_access = BP_MEM_READ;
|
||||
break;
|
||||
case MMU_DATA_STORE:
|
||||
elt_ofs = offsetof(CPUTLBEntry, addr_write);
|
||||
wp_access = BP_MEM_WRITE;
|
||||
break;
|
||||
case MMU_INST_FETCH:
|
||||
elt_ofs = offsetof(CPUTLBEntry, addr_code);
|
||||
wp_access = BP_MEM_READ;
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
tlb_addr = tlb_read_ofs(entry, elt_ofs);
|
||||
|
||||
if (unlikely(!tlb_hit(tlb_addr, addr))) {
|
||||
if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs,
|
||||
addr & TARGET_PAGE_MASK)) {
|
||||
tlb_fill(env_cpu(env), addr, size, access_type, mmu_idx, retaddr);
|
||||
/* TLB resize via tlb_fill may have moved the entry. */
|
||||
index = tlb_index(env, mmu_idx, addr);
|
||||
entry = tlb_entry(env, mmu_idx, addr);
|
||||
}
|
||||
tlb_addr = tlb_read_ofs(entry, elt_ofs);
|
||||
}
|
||||
|
||||
if (!size) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (unlikely(tlb_addr & TLB_FLAGS_MASK)) {
|
||||
CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
|
||||
|
||||
/* Reject I/O access, or other required slow-path. */
|
||||
if (tlb_addr & (TLB_MMIO | TLB_BSWAP | TLB_DISCARD_WRITE)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Handle watchpoints. */
|
||||
if (tlb_addr & TLB_WATCHPOINT) {
|
||||
cpu_check_watchpoint(env_cpu(env), addr, size,
|
||||
iotlbentry->attrs, wp_access, retaddr);
|
||||
}
|
||||
|
||||
/* Handle clean RAM pages. */
|
||||
if (tlb_addr & TLB_NOTDIRTY) {
|
||||
notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
|
||||
}
|
||||
}
|
||||
|
||||
return (void *)((uintptr_t)addr + entry->addend);
|
||||
}
|
||||
|
||||
void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
|
||||
MMUAccessType access_type, int mmu_idx)
|
||||
{
|
||||
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
|
||||
target_ulong tlb_addr, page;
|
||||
target_ulong tlb_addr, page_addr;
|
||||
size_t elt_ofs;
|
||||
int flags;
|
||||
|
||||
switch (access_type) {
|
||||
case MMU_DATA_LOAD:
|
||||
|
@ -1325,20 +1255,19 @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
|
|||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
page = addr & TARGET_PAGE_MASK;
|
||||
tlb_addr = tlb_read_ofs(entry, elt_ofs);
|
||||
|
||||
if (!tlb_hit_page(tlb_addr, page)) {
|
||||
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
||||
|
||||
if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) {
|
||||
page_addr = addr & TARGET_PAGE_MASK;
|
||||
if (!tlb_hit_page(tlb_addr, page_addr)) {
|
||||
if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
|
||||
CPUState *cs = env_cpu(env);
|
||||
CPUClass *cc = CPU_GET_CLASS(cs);
|
||||
|
||||
if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) {
|
||||
if (!cc->tlb_fill(cs, addr, fault_size, access_type,
|
||||
mmu_idx, nonfault, retaddr)) {
|
||||
/* Non-faulting page table read failed. */
|
||||
return NULL;
|
||||
*phost = NULL;
|
||||
return TLB_INVALID_MASK;
|
||||
}
|
||||
|
||||
/* TLB resize via tlb_fill may have moved the entry. */
|
||||
|
@ -1346,15 +1275,89 @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
|
|||
}
|
||||
tlb_addr = tlb_read_ofs(entry, elt_ofs);
|
||||
}
|
||||
flags = tlb_addr & TLB_FLAGS_MASK;
|
||||
|
||||
if (tlb_addr & ~TARGET_PAGE_MASK) {
|
||||
/* IO access */
|
||||
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
|
||||
if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
|
||||
*phost = NULL;
|
||||
return TLB_MMIO;
|
||||
}
|
||||
|
||||
/* Everything else is RAM. */
|
||||
*phost = (void *)((uintptr_t)addr + entry->addend);
|
||||
return flags;
|
||||
}
|
||||
|
||||
int probe_access_flags(CPUArchState *env, target_ulong addr,
|
||||
MMUAccessType access_type, int mmu_idx,
|
||||
bool nonfault, void **phost, uintptr_t retaddr)
|
||||
{
|
||||
int flags;
|
||||
|
||||
flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
|
||||
nonfault, phost, retaddr);
|
||||
|
||||
/* Handle clean RAM pages. */
|
||||
if (unlikely(flags & TLB_NOTDIRTY)) {
|
||||
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
||||
CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
|
||||
|
||||
notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
|
||||
flags &= ~TLB_NOTDIRTY;
|
||||
}
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
void *probe_access(CPUArchState *env, target_ulong addr, int size,
|
||||
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
|
||||
{
|
||||
void *host;
|
||||
int flags;
|
||||
|
||||
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
|
||||
|
||||
flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
|
||||
false, &host, retaddr);
|
||||
|
||||
/* Per the interface, size == 0 merely faults the access. */
|
||||
if (size == 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (void *)((uintptr_t)addr + entry->addend);
|
||||
if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
|
||||
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
||||
CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
|
||||
|
||||
/* Handle watchpoints. */
|
||||
if (flags & TLB_WATCHPOINT) {
|
||||
int wp_access = (access_type == MMU_DATA_STORE
|
||||
? BP_MEM_WRITE : BP_MEM_READ);
|
||||
cpu_check_watchpoint(env_cpu(env), addr, size,
|
||||
iotlbentry->attrs, wp_access, retaddr);
|
||||
}
|
||||
|
||||
/* Handle clean RAM pages. */
|
||||
if (flags & TLB_NOTDIRTY) {
|
||||
notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
|
||||
}
|
||||
}
|
||||
|
||||
return host;
|
||||
}
|
||||
|
||||
void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
|
||||
MMUAccessType access_type, int mmu_idx)
|
||||
{
|
||||
void *host;
|
||||
int flags;
|
||||
|
||||
flags = probe_access_internal(env, addr, 0, access_type,
|
||||
mmu_idx, true, &host, 0);
|
||||
|
||||
/* No combination of flags are expected by the caller. */
|
||||
return flags ? NULL : host;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PLUGIN
|
||||
/*
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue