cputlb: Move NOTDIRTY handling from I/O path to TLB path

Pages that we want to track for NOTDIRTY are RAM.  We do not
really need to go through the I/O path to handle them.

Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2019-09-18 09:15:44 -07:00
parent 7b0d792ce1
commit 08565552f7
4 changed files with 23 additions and 71 deletions

50
exec.c
View file

@ -88,7 +88,6 @@ static MemoryRegion *system_io;
AddressSpace address_space_io;
AddressSpace address_space_memory;
MemoryRegion io_mem_notdirty;
static MemoryRegion io_mem_unassigned;
#endif
@ -191,7 +190,6 @@ typedef struct subpage_t {
} subpage_t;
#define PHYS_SECTION_UNASSIGNED 0
#define PHYS_SECTION_NOTDIRTY 1
static void io_mem_init(void);
static void memory_map_init(void);
@ -1472,9 +1470,6 @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu,
if (memory_region_is_ram(section->mr)) {
/* Normal RAM. */
iotlb = memory_region_get_ram_addr(section->mr) + xlat;
if (!section->readonly) {
iotlb |= PHYS_SECTION_NOTDIRTY;
}
} else {
AddressSpaceDispatch *d;
@ -2783,42 +2778,6 @@ void memory_notdirty_write_complete(NotDirtyInfo *ndi)
}
}
/* Called within RCU critical section. */
static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
uint64_t val, unsigned size)
{
NotDirtyInfo ndi;
memory_notdirty_write_prepare(&ndi, current_cpu, current_cpu->mem_io_vaddr,
ram_addr, size);
stn_p(qemu_map_ram_ptr(NULL, ram_addr), size, val);
memory_notdirty_write_complete(&ndi);
}
static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
unsigned size, bool is_write,
MemTxAttrs attrs)
{
return is_write;
}
static const MemoryRegionOps notdirty_mem_ops = {
.write = notdirty_mem_write,
.valid.accepts = notdirty_mem_accepts,
.endianness = DEVICE_NATIVE_ENDIAN,
.valid = {
.min_access_size = 1,
.max_access_size = 8,
.unaligned = false,
},
.impl = {
.min_access_size = 1,
.max_access_size = 8,
.unaligned = false,
},
};
/* Generate a debug exception if a watchpoint has been hit. */
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
MemTxAttrs attrs, int flags, uintptr_t ra)
@ -3014,13 +2973,6 @@ static void io_mem_init(void)
{
memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
NULL, UINT64_MAX);
/* io_mem_notdirty calls tb_invalidate_phys_page_fast,
* which can be called without the iothread mutex.
*/
memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
NULL, UINT64_MAX);
memory_region_clear_global_locking(&io_mem_notdirty);
}
AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
@ -3030,8 +2982,6 @@ AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
n = dummy_section(&d->map, fv, &io_mem_unassigned);
assert(n == PHYS_SECTION_UNASSIGNED);
n = dummy_section(&d->map, fv, &io_mem_notdirty);
assert(n == PHYS_SECTION_NOTDIRTY);
d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };