mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-04 00:03:54 -06:00
tcg: Create struct CPUTLB
Move all softmmu tlb data into this structure. Arrange the members so that we are able to place mask+table together and at a smaller absolute offset from ENV. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Acked-by: Alistair Francis <alistair.francis@wdc.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
74433bf083
commit
a40ec84ee2
12 changed files with 146 additions and 163 deletions
|
@ -76,7 +76,7 @@ QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
|
|||
|
||||
static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
|
||||
{
|
||||
return env->tlb_mask[mmu_idx] + (1 << CPU_TLB_ENTRY_BITS);
|
||||
return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS);
|
||||
}
|
||||
|
||||
static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
|
||||
|
@ -91,14 +91,14 @@ static void tlb_dyn_init(CPUArchState *env)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < NB_MMU_MODES; i++) {
|
||||
CPUTLBDesc *desc = &env->tlb_d[i];
|
||||
CPUTLBDesc *desc = &env_tlb(env)->d[i];
|
||||
size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
|
||||
|
||||
tlb_window_reset(desc, get_clock_realtime(), 0);
|
||||
desc->n_used_entries = 0;
|
||||
env->tlb_mask[i] = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
|
||||
env->tlb_table[i] = g_new(CPUTLBEntry, n_entries);
|
||||
env->iotlb[i] = g_new(CPUIOTLBEntry, n_entries);
|
||||
env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
|
||||
env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries);
|
||||
env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -144,7 +144,7 @@ static void tlb_dyn_init(CPUArchState *env)
|
|||
*/
|
||||
static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
|
||||
{
|
||||
CPUTLBDesc *desc = &env->tlb_d[mmu_idx];
|
||||
CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
|
||||
size_t old_size = tlb_n_entries(env, mmu_idx);
|
||||
size_t rate;
|
||||
size_t new_size = old_size;
|
||||
|
@ -187,14 +187,14 @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
|
|||
return;
|
||||
}
|
||||
|
||||
g_free(env->tlb_table[mmu_idx]);
|
||||
g_free(env->iotlb[mmu_idx]);
|
||||
g_free(env_tlb(env)->f[mmu_idx].table);
|
||||
g_free(env_tlb(env)->d[mmu_idx].iotlb);
|
||||
|
||||
tlb_window_reset(desc, now, 0);
|
||||
/* desc->n_used_entries is cleared by the caller */
|
||||
env->tlb_mask[mmu_idx] = (new_size - 1) << CPU_TLB_ENTRY_BITS;
|
||||
env->tlb_table[mmu_idx] = g_try_new(CPUTLBEntry, new_size);
|
||||
env->iotlb[mmu_idx] = g_try_new(CPUIOTLBEntry, new_size);
|
||||
env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
|
||||
env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
|
||||
env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
|
||||
/*
|
||||
* If the allocations fail, try smaller sizes. We just freed some
|
||||
* memory, so going back to half of new_size has a good chance of working.
|
||||
|
@ -202,46 +202,47 @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
|
|||
* allocations to fail though, so we progressively reduce the allocation
|
||||
* size, aborting if we cannot even allocate the smallest TLB we support.
|
||||
*/
|
||||
while (env->tlb_table[mmu_idx] == NULL || env->iotlb[mmu_idx] == NULL) {
|
||||
while (env_tlb(env)->f[mmu_idx].table == NULL ||
|
||||
env_tlb(env)->d[mmu_idx].iotlb == NULL) {
|
||||
if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
|
||||
error_report("%s: %s", __func__, strerror(errno));
|
||||
abort();
|
||||
}
|
||||
new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
|
||||
env->tlb_mask[mmu_idx] = (new_size - 1) << CPU_TLB_ENTRY_BITS;
|
||||
env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
|
||||
|
||||
g_free(env->tlb_table[mmu_idx]);
|
||||
g_free(env->iotlb[mmu_idx]);
|
||||
env->tlb_table[mmu_idx] = g_try_new(CPUTLBEntry, new_size);
|
||||
env->iotlb[mmu_idx] = g_try_new(CPUIOTLBEntry, new_size);
|
||||
g_free(env_tlb(env)->f[mmu_idx].table);
|
||||
g_free(env_tlb(env)->d[mmu_idx].iotlb);
|
||||
env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
|
||||
env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx)
|
||||
{
|
||||
tlb_mmu_resize_locked(env, mmu_idx);
|
||||
memset(env->tlb_table[mmu_idx], -1, sizeof_tlb(env, mmu_idx));
|
||||
env->tlb_d[mmu_idx].n_used_entries = 0;
|
||||
memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
|
||||
env_tlb(env)->d[mmu_idx].n_used_entries = 0;
|
||||
}
|
||||
|
||||
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
|
||||
{
|
||||
env->tlb_d[mmu_idx].n_used_entries++;
|
||||
env_tlb(env)->d[mmu_idx].n_used_entries++;
|
||||
}
|
||||
|
||||
static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
|
||||
{
|
||||
env->tlb_d[mmu_idx].n_used_entries--;
|
||||
env_tlb(env)->d[mmu_idx].n_used_entries--;
|
||||
}
|
||||
|
||||
void tlb_init(CPUState *cpu)
|
||||
{
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
|
||||
qemu_spin_init(&env->tlb_c.lock);
|
||||
qemu_spin_init(&env_tlb(env)->c.lock);
|
||||
|
||||
/* Ensure that cpu_reset performs a full flush. */
|
||||
env->tlb_c.dirty = ALL_MMUIDX_BITS;
|
||||
env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
|
||||
|
||||
tlb_dyn_init(env);
|
||||
}
|
||||
|
@ -273,9 +274,9 @@ void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
|
|||
CPU_FOREACH(cpu) {
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
|
||||
full += atomic_read(&env->tlb_c.full_flush_count);
|
||||
part += atomic_read(&env->tlb_c.part_flush_count);
|
||||
elide += atomic_read(&env->tlb_c.elide_flush_count);
|
||||
full += atomic_read(&env_tlb(env)->c.full_flush_count);
|
||||
part += atomic_read(&env_tlb(env)->c.part_flush_count);
|
||||
elide += atomic_read(&env_tlb(env)->c.elide_flush_count);
|
||||
}
|
||||
*pfull = full;
|
||||
*ppart = part;
|
||||
|
@ -285,10 +286,11 @@ void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
|
|||
static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
|
||||
{
|
||||
tlb_table_flush_by_mmuidx(env, mmu_idx);
|
||||
memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
|
||||
env->tlb_d[mmu_idx].large_page_addr = -1;
|
||||
env->tlb_d[mmu_idx].large_page_mask = -1;
|
||||
env->tlb_d[mmu_idx].vindex = 0;
|
||||
env_tlb(env)->d[mmu_idx].large_page_addr = -1;
|
||||
env_tlb(env)->d[mmu_idx].large_page_mask = -1;
|
||||
env_tlb(env)->d[mmu_idx].vindex = 0;
|
||||
memset(env_tlb(env)->d[mmu_idx].vtable, -1,
|
||||
sizeof(env_tlb(env)->d[0].vtable));
|
||||
}
|
||||
|
||||
static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
|
||||
|
@ -301,31 +303,31 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
|
|||
|
||||
tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
|
||||
|
||||
qemu_spin_lock(&env->tlb_c.lock);
|
||||
qemu_spin_lock(&env_tlb(env)->c.lock);
|
||||
|
||||
all_dirty = env->tlb_c.dirty;
|
||||
all_dirty = env_tlb(env)->c.dirty;
|
||||
to_clean = asked & all_dirty;
|
||||
all_dirty &= ~to_clean;
|
||||
env->tlb_c.dirty = all_dirty;
|
||||
env_tlb(env)->c.dirty = all_dirty;
|
||||
|
||||
for (work = to_clean; work != 0; work &= work - 1) {
|
||||
int mmu_idx = ctz32(work);
|
||||
tlb_flush_one_mmuidx_locked(env, mmu_idx);
|
||||
}
|
||||
|
||||
qemu_spin_unlock(&env->tlb_c.lock);
|
||||
qemu_spin_unlock(&env_tlb(env)->c.lock);
|
||||
|
||||
cpu_tb_jmp_cache_clear(cpu);
|
||||
|
||||
if (to_clean == ALL_MMUIDX_BITS) {
|
||||
atomic_set(&env->tlb_c.full_flush_count,
|
||||
env->tlb_c.full_flush_count + 1);
|
||||
atomic_set(&env_tlb(env)->c.full_flush_count,
|
||||
env_tlb(env)->c.full_flush_count + 1);
|
||||
} else {
|
||||
atomic_set(&env->tlb_c.part_flush_count,
|
||||
env->tlb_c.part_flush_count + ctpop16(to_clean));
|
||||
atomic_set(&env_tlb(env)->c.part_flush_count,
|
||||
env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
|
||||
if (to_clean != asked) {
|
||||
atomic_set(&env->tlb_c.elide_flush_count,
|
||||
env->tlb_c.elide_flush_count +
|
||||
atomic_set(&env_tlb(env)->c.elide_flush_count,
|
||||
env_tlb(env)->c.elide_flush_count +
|
||||
ctpop16(asked & ~to_clean));
|
||||
}
|
||||
}
|
||||
|
@ -410,11 +412,12 @@ static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
|
|||
static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
|
||||
target_ulong page)
|
||||
{
|
||||
CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
|
||||
int k;
|
||||
|
||||
assert_cpu_is_self(ENV_GET_CPU(env));
|
||||
for (k = 0; k < CPU_VTLB_SIZE; k++) {
|
||||
if (tlb_flush_entry_locked(&env->tlb_v_table[mmu_idx][k], page)) {
|
||||
if (tlb_flush_entry_locked(&d->vtable[k], page)) {
|
||||
tlb_n_used_entries_dec(env, mmu_idx);
|
||||
}
|
||||
}
|
||||
|
@ -423,8 +426,8 @@ static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
|
|||
static void tlb_flush_page_locked(CPUArchState *env, int midx,
|
||||
target_ulong page)
|
||||
{
|
||||
target_ulong lp_addr = env->tlb_d[midx].large_page_addr;
|
||||
target_ulong lp_mask = env->tlb_d[midx].large_page_mask;
|
||||
target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
|
||||
target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
|
||||
|
||||
/* Check if we need to flush due to large pages. */
|
||||
if ((page & lp_mask) == lp_addr) {
|
||||
|
@ -459,13 +462,13 @@ static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
|
|||
tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
|
||||
addr, mmu_idx_bitmap);
|
||||
|
||||
qemu_spin_lock(&env->tlb_c.lock);
|
||||
qemu_spin_lock(&env_tlb(env)->c.lock);
|
||||
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
|
||||
if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
|
||||
tlb_flush_page_locked(env, mmu_idx, addr);
|
||||
}
|
||||
}
|
||||
qemu_spin_unlock(&env->tlb_c.lock);
|
||||
qemu_spin_unlock(&env_tlb(env)->c.lock);
|
||||
|
||||
tb_flush_jmp_cache(cpu, addr);
|
||||
}
|
||||
|
@ -609,22 +612,22 @@ void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
|
|||
int mmu_idx;
|
||||
|
||||
env = cpu->env_ptr;
|
||||
qemu_spin_lock(&env->tlb_c.lock);
|
||||
qemu_spin_lock(&env_tlb(env)->c.lock);
|
||||
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
|
||||
unsigned int i;
|
||||
unsigned int n = tlb_n_entries(env, mmu_idx);
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
tlb_reset_dirty_range_locked(&env->tlb_table[mmu_idx][i], start1,
|
||||
length);
|
||||
tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
|
||||
start1, length);
|
||||
}
|
||||
|
||||
for (i = 0; i < CPU_VTLB_SIZE; i++) {
|
||||
tlb_reset_dirty_range_locked(&env->tlb_v_table[mmu_idx][i], start1,
|
||||
length);
|
||||
tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
|
||||
start1, length);
|
||||
}
|
||||
}
|
||||
qemu_spin_unlock(&env->tlb_c.lock);
|
||||
qemu_spin_unlock(&env_tlb(env)->c.lock);
|
||||
}
|
||||
|
||||
/* Called with tlb_c.lock held */
|
||||
|
@ -646,7 +649,7 @@ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
|
|||
assert_cpu_is_self(cpu);
|
||||
|
||||
vaddr &= TARGET_PAGE_MASK;
|
||||
qemu_spin_lock(&env->tlb_c.lock);
|
||||
qemu_spin_lock(&env_tlb(env)->c.lock);
|
||||
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
|
||||
tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
|
||||
}
|
||||
|
@ -654,10 +657,10 @@ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
|
|||
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
|
||||
int k;
|
||||
for (k = 0; k < CPU_VTLB_SIZE; k++) {
|
||||
tlb_set_dirty1_locked(&env->tlb_v_table[mmu_idx][k], vaddr);
|
||||
tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
|
||||
}
|
||||
}
|
||||
qemu_spin_unlock(&env->tlb_c.lock);
|
||||
qemu_spin_unlock(&env_tlb(env)->c.lock);
|
||||
}
|
||||
|
||||
/* Our TLB does not support large pages, so remember the area covered by
|
||||
|
@ -665,7 +668,7 @@ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
|
|||
static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
|
||||
target_ulong vaddr, target_ulong size)
|
||||
{
|
||||
target_ulong lp_addr = env->tlb_d[mmu_idx].large_page_addr;
|
||||
target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
|
||||
target_ulong lp_mask = ~(size - 1);
|
||||
|
||||
if (lp_addr == (target_ulong)-1) {
|
||||
|
@ -675,13 +678,13 @@ static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
|
|||
/* Extend the existing region to include the new page.
|
||||
This is a compromise between unnecessary flushes and
|
||||
the cost of maintaining a full variable size TLB. */
|
||||
lp_mask &= env->tlb_d[mmu_idx].large_page_mask;
|
||||
lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
|
||||
while (((lp_addr ^ vaddr) & lp_mask) != 0) {
|
||||
lp_mask <<= 1;
|
||||
}
|
||||
}
|
||||
env->tlb_d[mmu_idx].large_page_addr = lp_addr & lp_mask;
|
||||
env->tlb_d[mmu_idx].large_page_mask = lp_mask;
|
||||
env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
|
||||
env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
|
||||
}
|
||||
|
||||
/* Add a new TLB entry. At most one entry for a given virtual address
|
||||
|
@ -696,6 +699,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
|||
int mmu_idx, target_ulong size)
|
||||
{
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
CPUTLB *tlb = env_tlb(env);
|
||||
CPUTLBDesc *desc = &tlb->d[mmu_idx];
|
||||
MemoryRegionSection *section;
|
||||
unsigned int index;
|
||||
target_ulong address;
|
||||
|
@ -757,10 +762,10 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
|||
* a longer critical section, but this is not a concern since the TLB lock
|
||||
* is unlikely to be contended.
|
||||
*/
|
||||
qemu_spin_lock(&env->tlb_c.lock);
|
||||
qemu_spin_lock(&tlb->c.lock);
|
||||
|
||||
/* Note that the tlb is no longer clean. */
|
||||
env->tlb_c.dirty |= 1 << mmu_idx;
|
||||
tlb->c.dirty |= 1 << mmu_idx;
|
||||
|
||||
/* Make sure there's no cached translation for the new page. */
|
||||
tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
|
||||
|
@ -770,12 +775,12 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
|||
* different page; otherwise just overwrite the stale data.
|
||||
*/
|
||||
if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
|
||||
unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE;
|
||||
CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];
|
||||
unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
|
||||
CPUTLBEntry *tv = &desc->vtable[vidx];
|
||||
|
||||
/* Evict the old entry into the victim tlb. */
|
||||
copy_tlb_helper_locked(tv, te);
|
||||
env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
|
||||
desc->viotlb[vidx] = desc->iotlb[index];
|
||||
tlb_n_used_entries_dec(env, mmu_idx);
|
||||
}
|
||||
|
||||
|
@ -792,8 +797,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
|||
* subtract here is that of the page base, and not the same as the
|
||||
* vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
|
||||
*/
|
||||
env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page;
|
||||
env->iotlb[mmu_idx][index].attrs = attrs;
|
||||
desc->iotlb[index].addr = iotlb - vaddr_page;
|
||||
desc->iotlb[index].attrs = attrs;
|
||||
|
||||
/* Now calculate the new entry */
|
||||
tn.addend = addend - vaddr_page;
|
||||
|
@ -829,7 +834,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
|||
|
||||
copy_tlb_helper_locked(te, &tn);
|
||||
tlb_n_used_entries_inc(env, mmu_idx);
|
||||
qemu_spin_unlock(&env->tlb_c.lock);
|
||||
qemu_spin_unlock(&tlb->c.lock);
|
||||
}
|
||||
|
||||
/* Add a new TLB entry, but without specifying the memory
|
||||
|
@ -976,21 +981,28 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
|
|||
|
||||
assert_cpu_is_self(ENV_GET_CPU(env));
|
||||
for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
|
||||
CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
|
||||
target_ulong cmp = tlb_read_ofs(vtlb, elt_ofs);
|
||||
CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
|
||||
target_ulong cmp;
|
||||
|
||||
/* elt_ofs might correspond to .addr_write, so use atomic_read */
|
||||
#if TCG_OVERSIZED_GUEST
|
||||
cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
|
||||
#else
|
||||
cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
|
||||
#endif
|
||||
|
||||
if (cmp == page) {
|
||||
/* Found entry in victim tlb, swap tlb and iotlb. */
|
||||
CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
|
||||
CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
|
||||
|
||||
qemu_spin_lock(&env->tlb_c.lock);
|
||||
qemu_spin_lock(&env_tlb(env)->c.lock);
|
||||
copy_tlb_helper_locked(&tmptlb, tlb);
|
||||
copy_tlb_helper_locked(tlb, vtlb);
|
||||
copy_tlb_helper_locked(vtlb, &tmptlb);
|
||||
qemu_spin_unlock(&env->tlb_c.lock);
|
||||
qemu_spin_unlock(&env_tlb(env)->c.lock);
|
||||
|
||||
CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
|
||||
CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
|
||||
CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index];
|
||||
CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx];
|
||||
tmpio = *io; *io = *vio; *vio = tmpio;
|
||||
return true;
|
||||
}
|
||||
|
@ -1293,8 +1305,8 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
|||
}
|
||||
}
|
||||
|
||||
res = io_readx(env, &env->iotlb[mmu_idx][index], mmu_idx, addr,
|
||||
retaddr, access_type, size);
|
||||
res = io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index],
|
||||
mmu_idx, addr, retaddr, access_type, size);
|
||||
return handle_bswap(res, size, big_endian);
|
||||
}
|
||||
|
||||
|
@ -1541,7 +1553,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
|||
}
|
||||
}
|
||||
|
||||
io_writex(env, &env->iotlb[mmu_idx][index], mmu_idx,
|
||||
io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx,
|
||||
handle_bswap(val, size, big_endian),
|
||||
addr, retaddr, size);
|
||||
return;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue