mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-06 01:03:55 -06:00
xen: mapcache: Split mapcache_grants by ro and rw
Today, we don't track write-abiliy in the cache, if a user requests a readable mapping followed by a writeable mapping on the same page, the second lookup will incorrectly hit the readable entry. Split mapcache_grants by ro and rw access. Grants will now have separate ways in the cache depending on writeability. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org> Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
This commit is contained in:
parent
a4b20f737c
commit
88fb705600
1 changed files with 19 additions and 7 deletions
|
@ -75,7 +75,8 @@ typedef struct MapCache {
|
||||||
} MapCache;
|
} MapCache;
|
||||||
|
|
||||||
static MapCache *mapcache;
|
static MapCache *mapcache;
|
||||||
static MapCache *mapcache_grants;
|
static MapCache *mapcache_grants_ro;
|
||||||
|
static MapCache *mapcache_grants_rw;
|
||||||
static xengnttab_handle *xen_region_gnttabdev;
|
static xengnttab_handle *xen_region_gnttabdev;
|
||||||
|
|
||||||
static inline void mapcache_lock(MapCache *mc)
|
static inline void mapcache_lock(MapCache *mc)
|
||||||
|
@ -176,9 +177,12 @@ void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque)
|
||||||
* Grant mappings must use XC_PAGE_SIZE granularity since we can't
|
* Grant mappings must use XC_PAGE_SIZE granularity since we can't
|
||||||
* map anything beyond the number of pages granted to us.
|
* map anything beyond the number of pages granted to us.
|
||||||
*/
|
*/
|
||||||
mapcache_grants = xen_map_cache_init_single(f, opaque,
|
mapcache_grants_ro = xen_map_cache_init_single(f, opaque,
|
||||||
XC_PAGE_SHIFT,
|
XC_PAGE_SHIFT,
|
||||||
max_mcache_size);
|
max_mcache_size);
|
||||||
|
mapcache_grants_rw = xen_map_cache_init_single(f, opaque,
|
||||||
|
XC_PAGE_SHIFT,
|
||||||
|
max_mcache_size);
|
||||||
|
|
||||||
setrlimit(RLIMIT_AS, &rlimit_as);
|
setrlimit(RLIMIT_AS, &rlimit_as);
|
||||||
}
|
}
|
||||||
|
@ -456,9 +460,13 @@ uint8_t *xen_map_cache(MemoryRegion *mr,
|
||||||
bool is_write)
|
bool is_write)
|
||||||
{
|
{
|
||||||
bool grant = xen_mr_is_grants(mr);
|
bool grant = xen_mr_is_grants(mr);
|
||||||
MapCache *mc = grant ? mapcache_grants : mapcache;
|
MapCache *mc = mapcache;
|
||||||
uint8_t *p;
|
uint8_t *p;
|
||||||
|
|
||||||
|
if (grant) {
|
||||||
|
mc = is_write ? mapcache_grants_rw : mapcache_grants_ro;
|
||||||
|
}
|
||||||
|
|
||||||
if (grant && !lock) {
|
if (grant && !lock) {
|
||||||
/*
|
/*
|
||||||
* Grants are only supported via address_space_map(). Anything
|
* Grants are only supported via address_space_map(). Anything
|
||||||
|
@ -523,7 +531,10 @@ ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
|
||||||
|
|
||||||
addr = xen_ram_addr_from_mapcache_single(mapcache, ptr);
|
addr = xen_ram_addr_from_mapcache_single(mapcache, ptr);
|
||||||
if (addr == RAM_ADDR_INVALID) {
|
if (addr == RAM_ADDR_INVALID) {
|
||||||
addr = xen_ram_addr_from_mapcache_single(mapcache_grants, ptr);
|
addr = xen_ram_addr_from_mapcache_single(mapcache_grants_ro, ptr);
|
||||||
|
}
|
||||||
|
if (addr == RAM_ADDR_INVALID) {
|
||||||
|
addr = xen_ram_addr_from_mapcache_single(mapcache_grants_rw, ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
return addr;
|
return addr;
|
||||||
|
@ -626,7 +637,8 @@ static void xen_invalidate_map_cache_entry_single(MapCache *mc, uint8_t *buffer)
|
||||||
static void xen_invalidate_map_cache_entry_all(uint8_t *buffer)
|
static void xen_invalidate_map_cache_entry_all(uint8_t *buffer)
|
||||||
{
|
{
|
||||||
xen_invalidate_map_cache_entry_single(mapcache, buffer);
|
xen_invalidate_map_cache_entry_single(mapcache, buffer);
|
||||||
xen_invalidate_map_cache_entry_single(mapcache_grants, buffer);
|
xen_invalidate_map_cache_entry_single(mapcache_grants_ro, buffer);
|
||||||
|
xen_invalidate_map_cache_entry_single(mapcache_grants_rw, buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xen_invalidate_map_cache_entry_bh(void *opaque)
|
static void xen_invalidate_map_cache_entry_bh(void *opaque)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue