mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-09 02:24:58 -06:00
accel/tcg: Rename CPUIOTLBEntry to CPUTLBEntryFull
This structure will shortly contain more than just data for accessing MMIO. Rename the 'addr' member to 'xlat_section' to more clearly indicate its purpose. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
8810ee2ac0
commit
25d3ec5831
5 changed files with 73 additions and 71 deletions
|
@ -200,13 +200,13 @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
|
|||
}
|
||||
|
||||
g_free(fast->table);
|
||||
g_free(desc->iotlb);
|
||||
g_free(desc->fulltlb);
|
||||
|
||||
tlb_window_reset(desc, now, 0);
|
||||
/* desc->n_used_entries is cleared by the caller */
|
||||
fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
|
||||
fast->table = g_try_new(CPUTLBEntry, new_size);
|
||||
desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
|
||||
desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
|
||||
|
||||
/*
|
||||
* If the allocations fail, try smaller sizes. We just freed some
|
||||
|
@ -215,7 +215,7 @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
|
|||
* allocations to fail though, so we progressively reduce the allocation
|
||||
* size, aborting if we cannot even allocate the smallest TLB we support.
|
||||
*/
|
||||
while (fast->table == NULL || desc->iotlb == NULL) {
|
||||
while (fast->table == NULL || desc->fulltlb == NULL) {
|
||||
if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
|
||||
error_report("%s: %s", __func__, strerror(errno));
|
||||
abort();
|
||||
|
@ -224,9 +224,9 @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
|
|||
fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
|
||||
|
||||
g_free(fast->table);
|
||||
g_free(desc->iotlb);
|
||||
g_free(desc->fulltlb);
|
||||
fast->table = g_try_new(CPUTLBEntry, new_size);
|
||||
desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
|
||||
desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -258,7 +258,7 @@ static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
|
|||
desc->n_used_entries = 0;
|
||||
fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
|
||||
fast->table = g_new(CPUTLBEntry, n_entries);
|
||||
desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
|
||||
desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
|
||||
tlb_mmu_flush_locked(desc, fast);
|
||||
}
|
||||
|
||||
|
@ -299,7 +299,7 @@ void tlb_destroy(CPUState *cpu)
|
|||
CPUTLBDescFast *fast = &env_tlb(env)->f[i];
|
||||
|
||||
g_free(fast->table);
|
||||
g_free(desc->iotlb);
|
||||
g_free(desc->fulltlb);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1219,7 +1219,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
|||
|
||||
/* Evict the old entry into the victim tlb. */
|
||||
copy_tlb_helper_locked(tv, te);
|
||||
desc->viotlb[vidx] = desc->iotlb[index];
|
||||
desc->vfulltlb[vidx] = desc->fulltlb[index];
|
||||
tlb_n_used_entries_dec(env, mmu_idx);
|
||||
}
|
||||
|
||||
|
@ -1236,8 +1236,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
|||
* subtract here is that of the page base, and not the same as the
|
||||
* vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
|
||||
*/
|
||||
desc->iotlb[index].addr = iotlb - vaddr_page;
|
||||
desc->iotlb[index].attrs = attrs;
|
||||
desc->fulltlb[index].xlat_section = iotlb - vaddr_page;
|
||||
desc->fulltlb[index].attrs = attrs;
|
||||
|
||||
/* Now calculate the new entry */
|
||||
tn.addend = addend - vaddr_page;
|
||||
|
@ -1327,7 +1327,7 @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
|
|||
}
|
||||
}
|
||||
|
||||
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
||||
static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
|
||||
int mmu_idx, target_ulong addr, uintptr_t retaddr,
|
||||
MMUAccessType access_type, MemOp op)
|
||||
{
|
||||
|
@ -1339,9 +1339,9 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
|||
bool locked = false;
|
||||
MemTxResult r;
|
||||
|
||||
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
|
||||
section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
|
||||
mr = section->mr;
|
||||
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
|
||||
mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
|
||||
cpu->mem_io_pc = retaddr;
|
||||
if (!cpu->can_do_io) {
|
||||
cpu_io_recompile(cpu, retaddr);
|
||||
|
@ -1351,14 +1351,14 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
|||
qemu_mutex_lock_iothread();
|
||||
locked = true;
|
||||
}
|
||||
r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs);
|
||||
r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
|
||||
if (r != MEMTX_OK) {
|
||||
hwaddr physaddr = mr_offset +
|
||||
section->offset_within_address_space -
|
||||
section->offset_within_region;
|
||||
|
||||
cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
|
||||
mmu_idx, iotlbentry->attrs, r, retaddr);
|
||||
mmu_idx, full->attrs, r, retaddr);
|
||||
}
|
||||
if (locked) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
|
@ -1368,8 +1368,8 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
|||
}
|
||||
|
||||
/*
|
||||
* Save a potentially trashed IOTLB entry for later lookup by plugin.
|
||||
* This is read by tlb_plugin_lookup if the iotlb entry doesn't match
|
||||
* Save a potentially trashed CPUTLBEntryFull for later lookup by plugin.
|
||||
* This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
|
||||
* because of the side effect of io_writex changing memory layout.
|
||||
*/
|
||||
static void save_iotlb_data(CPUState *cs, hwaddr addr,
|
||||
|
@ -1383,7 +1383,7 @@ static void save_iotlb_data(CPUState *cs, hwaddr addr,
|
|||
#endif
|
||||
}
|
||||
|
||||
static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
||||
static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
|
||||
int mmu_idx, uint64_t val, target_ulong addr,
|
||||
uintptr_t retaddr, MemOp op)
|
||||
{
|
||||
|
@ -1394,9 +1394,9 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
|||
bool locked = false;
|
||||
MemTxResult r;
|
||||
|
||||
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
|
||||
section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
|
||||
mr = section->mr;
|
||||
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
|
||||
mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
|
||||
if (!cpu->can_do_io) {
|
||||
cpu_io_recompile(cpu, retaddr);
|
||||
}
|
||||
|
@ -1406,20 +1406,20 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
|||
* The memory_region_dispatch may trigger a flush/resize
|
||||
* so for plugins we save the iotlb_data just in case.
|
||||
*/
|
||||
save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset);
|
||||
save_iotlb_data(cpu, full->xlat_section, section, mr_offset);
|
||||
|
||||
if (!qemu_mutex_iothread_locked()) {
|
||||
qemu_mutex_lock_iothread();
|
||||
locked = true;
|
||||
}
|
||||
r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs);
|
||||
r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
|
||||
if (r != MEMTX_OK) {
|
||||
hwaddr physaddr = mr_offset +
|
||||
section->offset_within_address_space -
|
||||
section->offset_within_region;
|
||||
|
||||
cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
|
||||
MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r,
|
||||
MMU_DATA_STORE, mmu_idx, full->attrs, r,
|
||||
retaddr);
|
||||
}
|
||||
if (locked) {
|
||||
|
@ -1466,9 +1466,10 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
|
|||
copy_tlb_helper_locked(vtlb, &tmptlb);
|
||||
qemu_spin_unlock(&env_tlb(env)->c.lock);
|
||||
|
||||
CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index];
|
||||
CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx];
|
||||
tmpio = *io; *io = *vio; *vio = tmpio;
|
||||
CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index];
|
||||
CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx];
|
||||
CPUTLBEntryFull tmpf;
|
||||
tmpf = *f1; *f1 = *f2; *f2 = tmpf;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -1481,9 +1482,9 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
|
|||
(ADDR) & TARGET_PAGE_MASK)
|
||||
|
||||
static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
|
||||
CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
|
||||
CPUTLBEntryFull *full, uintptr_t retaddr)
|
||||
{
|
||||
ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;
|
||||
ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
|
||||
|
||||
trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
|
||||
|
||||
|
@ -1575,9 +1576,9 @@ int probe_access_flags(CPUArchState *env, target_ulong addr,
|
|||
/* Handle clean RAM pages. */
|
||||
if (unlikely(flags & TLB_NOTDIRTY)) {
|
||||
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
||||
CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
|
||||
CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
|
||||
|
||||
notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
|
||||
notdirty_write(env_cpu(env), addr, 1, full, retaddr);
|
||||
flags &= ~TLB_NOTDIRTY;
|
||||
}
|
||||
|
||||
|
@ -1602,19 +1603,19 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
|
|||
|
||||
if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
|
||||
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
||||
CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
|
||||
CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
|
||||
|
||||
/* Handle watchpoints. */
|
||||
if (flags & TLB_WATCHPOINT) {
|
||||
int wp_access = (access_type == MMU_DATA_STORE
|
||||
? BP_MEM_WRITE : BP_MEM_READ);
|
||||
cpu_check_watchpoint(env_cpu(env), addr, size,
|
||||
iotlbentry->attrs, wp_access, retaddr);
|
||||
full->attrs, wp_access, retaddr);
|
||||
}
|
||||
|
||||
/* Handle clean RAM pages. */
|
||||
if (flags & TLB_NOTDIRTY) {
|
||||
notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
|
||||
notdirty_write(env_cpu(env), addr, 1, full, retaddr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1671,7 +1672,7 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
|
|||
* should have just filled the TLB. The one corner case is io_writex
|
||||
* which can cause TLB flushes and potential resizing of the TLBs
|
||||
* losing the information we need. In those cases we need to recover
|
||||
* data from a copy of the iotlbentry. As long as this always occurs
|
||||
* data from a copy of the CPUTLBEntryFull. As long as this always occurs
|
||||
* from the same thread (which a mem callback will be) this is safe.
|
||||
*/
|
||||
|
||||
|
@ -1686,11 +1687,12 @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
|
|||
if (likely(tlb_hit(tlb_addr, addr))) {
|
||||
/* We must have an iotlb entry for MMIO */
|
||||
if (tlb_addr & TLB_MMIO) {
|
||||
CPUIOTLBEntry *iotlbentry;
|
||||
iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
|
||||
CPUTLBEntryFull *full;
|
||||
full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
|
||||
data->is_io = true;
|
||||
data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
|
||||
data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
|
||||
data->v.io.section =
|
||||
iotlb_to_section(cpu, full->xlat_section, full->attrs);
|
||||
data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
|
||||
} else {
|
||||
data->is_io = false;
|
||||
data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
|
||||
|
@ -1798,7 +1800,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
|||
|
||||
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
|
||||
notdirty_write(env_cpu(env), addr, size,
|
||||
&env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
|
||||
&env_tlb(env)->d[mmu_idx].fulltlb[index], retaddr);
|
||||
}
|
||||
|
||||
return hostaddr;
|
||||
|
@ -1906,7 +1908,7 @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
|
|||
|
||||
/* Handle anything that isn't just a straight memory access. */
|
||||
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
|
||||
CPUIOTLBEntry *iotlbentry;
|
||||
CPUTLBEntryFull *full;
|
||||
bool need_swap;
|
||||
|
||||
/* For anything that is unaligned, recurse through full_load. */
|
||||
|
@ -1914,20 +1916,20 @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
|
|||
goto do_unaligned_access;
|
||||
}
|
||||
|
||||
iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
|
||||
full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
|
||||
|
||||
/* Handle watchpoints. */
|
||||
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
|
||||
/* On watchpoint hit, this will longjmp out. */
|
||||
cpu_check_watchpoint(env_cpu(env), addr, size,
|
||||
iotlbentry->attrs, BP_MEM_READ, retaddr);
|
||||
full->attrs, BP_MEM_READ, retaddr);
|
||||
}
|
||||
|
||||
need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
|
||||
|
||||
/* Handle I/O access. */
|
||||
if (likely(tlb_addr & TLB_MMIO)) {
|
||||
return io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
|
||||
return io_readx(env, full, mmu_idx, addr, retaddr,
|
||||
access_type, op ^ (need_swap * MO_BSWAP));
|
||||
}
|
||||
|
||||
|
@ -2242,12 +2244,12 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
|
|||
*/
|
||||
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
|
||||
cpu_check_watchpoint(env_cpu(env), addr, size - size2,
|
||||
env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
|
||||
env_tlb(env)->d[mmu_idx].fulltlb[index].attrs,
|
||||
BP_MEM_WRITE, retaddr);
|
||||
}
|
||||
if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
|
||||
cpu_check_watchpoint(env_cpu(env), page2, size2,
|
||||
env_tlb(env)->d[mmu_idx].iotlb[index2].attrs,
|
||||
env_tlb(env)->d[mmu_idx].fulltlb[index2].attrs,
|
||||
BP_MEM_WRITE, retaddr);
|
||||
}
|
||||
|
||||
|
@ -2311,7 +2313,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
|||
|
||||
/* Handle anything that isn't just a straight memory access. */
|
||||
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
|
||||
CPUIOTLBEntry *iotlbentry;
|
||||
CPUTLBEntryFull *full;
|
||||
bool need_swap;
|
||||
|
||||
/* For anything that is unaligned, recurse through byte stores. */
|
||||
|
@ -2319,20 +2321,20 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
|||
goto do_unaligned_access;
|
||||
}
|
||||
|
||||
iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
|
||||
full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
|
||||
|
||||
/* Handle watchpoints. */
|
||||
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
|
||||
/* On watchpoint hit, this will longjmp out. */
|
||||
cpu_check_watchpoint(env_cpu(env), addr, size,
|
||||
iotlbentry->attrs, BP_MEM_WRITE, retaddr);
|
||||
full->attrs, BP_MEM_WRITE, retaddr);
|
||||
}
|
||||
|
||||
need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
|
||||
|
||||
/* Handle I/O access. */
|
||||
if (tlb_addr & TLB_MMIO) {
|
||||
io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
|
||||
io_writex(env, full, mmu_idx, val, addr, retaddr,
|
||||
op ^ (need_swap * MO_BSWAP));
|
||||
return;
|
||||
}
|
||||
|
@ -2344,7 +2346,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
|||
|
||||
/* Handle clean RAM pages. */
|
||||
if (tlb_addr & TLB_NOTDIRTY) {
|
||||
notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
|
||||
notdirty_write(env_cpu(env), addr, size, full, retaddr);
|
||||
}
|
||||
|
||||
haddr = (void *)((uintptr_t)addr + entry->addend);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue