xen: mapcache: Split mapcache_grants by ro and rw

Today, we don't track write-abiliy in the cache, if a user
requests a readable mapping followed by a writeable mapping
on the same page, the second lookup will incorrectly hit
the readable entry.

Split mapcache_grants by ro and rw access. Grants will now
have separate ways in the cache depending on writeability.

Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
This commit is contained in:
Edgar E. Iglesias 2025-04-25 15:16:01 +02:00
parent a4b20f737c
commit 88fb705600

View file

@ -75,7 +75,8 @@ typedef struct MapCache {
} MapCache;
static MapCache *mapcache;
static MapCache *mapcache_grants;
static MapCache *mapcache_grants_ro;
static MapCache *mapcache_grants_rw;
static xengnttab_handle *xen_region_gnttabdev;
static inline void mapcache_lock(MapCache *mc)
@ -176,9 +177,12 @@ void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque)
* Grant mappings must use XC_PAGE_SIZE granularity since we can't
* map anything beyond the number of pages granted to us.
*/
mapcache_grants = xen_map_cache_init_single(f, opaque,
XC_PAGE_SHIFT,
max_mcache_size);
mapcache_grants_ro = xen_map_cache_init_single(f, opaque,
XC_PAGE_SHIFT,
max_mcache_size);
mapcache_grants_rw = xen_map_cache_init_single(f, opaque,
XC_PAGE_SHIFT,
max_mcache_size);
setrlimit(RLIMIT_AS, &rlimit_as);
}
@ -456,9 +460,13 @@ uint8_t *xen_map_cache(MemoryRegion *mr,
bool is_write)
{
bool grant = xen_mr_is_grants(mr);
MapCache *mc = grant ? mapcache_grants : mapcache;
MapCache *mc = mapcache;
uint8_t *p;
if (grant) {
mc = is_write ? mapcache_grants_rw : mapcache_grants_ro;
}
if (grant && !lock) {
/*
* Grants are only supported via address_space_map(). Anything
@ -523,7 +531,10 @@ ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
addr = xen_ram_addr_from_mapcache_single(mapcache, ptr);
if (addr == RAM_ADDR_INVALID) {
addr = xen_ram_addr_from_mapcache_single(mapcache_grants, ptr);
addr = xen_ram_addr_from_mapcache_single(mapcache_grants_ro, ptr);
}
if (addr == RAM_ADDR_INVALID) {
addr = xen_ram_addr_from_mapcache_single(mapcache_grants_rw, ptr);
}
return addr;
@ -626,7 +637,8 @@ static void xen_invalidate_map_cache_entry_single(MapCache *mc, uint8_t *buffer)
static void xen_invalidate_map_cache_entry_all(uint8_t *buffer)
{
xen_invalidate_map_cache_entry_single(mapcache, buffer);
xen_invalidate_map_cache_entry_single(mapcache_grants, buffer);
xen_invalidate_map_cache_entry_single(mapcache_grants_ro, buffer);
xen_invalidate_map_cache_entry_single(mapcache_grants_rw, buffer);
}
static void xen_invalidate_map_cache_entry_bh(void *opaque)