mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-07 01:33:56 -06:00
accel/tcg: allow to invalidate a write TLB entry immediately
Background: s390x implements Low-Address Protection (LAP). If LAP is enabled, writing to effective addresses (before any translation) 0-511 and 4096-4607 triggers a protection exception. So we have subpage protection on the first two pages of every address space (where the lowcore - the CPU private data resides). By immediately invalidating the write entry but allowing the caller to continue, we force every write access onto these first two pages into the slow path. we will get a tlb fault with the specific accessed addresses and can then evaluate if protection applies or not. We have to make sure to ignore the invalid bit if tlb_fill() succeeds. Signed-off-by: David Hildenbrand <david@redhat.com> Message-Id: <20171016202358.3633-2-david@redhat.com> Signed-off-by: Cornelia Huck <cohuck@redhat.com>
This commit is contained in:
parent
d0a5cc5bf4
commit
f52bfb1214
3 changed files with 9 additions and 3 deletions
|
@ -694,6 +694,9 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
|||
} else {
|
||||
tn.addr_write = address;
|
||||
}
|
||||
if (prot & PAGE_WRITE_INV) {
|
||||
tn.addr_write |= TLB_INVALID_MASK;
|
||||
}
|
||||
}
|
||||
|
||||
/* Pairs with flag setting in tlb_reset_dirty_range */
|
||||
|
@ -978,7 +981,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
|||
if (!VICTIM_TLB_HIT(addr_write, addr)) {
|
||||
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
|
||||
}
|
||||
tlb_addr = tlbe->addr_write;
|
||||
tlb_addr = tlbe->addr_write & ~TLB_INVALID_MASK;
|
||||
}
|
||||
|
||||
/* Check notdirty */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue