mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-07 17:53:56 -06:00
accel/tcg: Remove TARGET_PAGE_DATA_SIZE
This macro is used by only one target, and even then under unusual conditions -- AArch64 with mmap's PROT_MTE flag. Since page size for aarch64-linux-user is variable, the per-page data size is also variable. Since page_reset_target_data via target_munmap does not have ready access to CPUState, simply pass in the size from the first allocation and remember that. Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
2c0b261fcd
commit
964080d356
4 changed files with 23 additions and 19 deletions
|
@ -870,7 +870,6 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
|
||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef TARGET_PAGE_DATA_SIZE
|
|
||||||
/*
|
/*
|
||||||
* Allocate chunks of target data together. For the only current user,
|
* Allocate chunks of target data together. For the only current user,
|
||||||
* if we allocate one hunk per page, we have overhead of 40/128 or 40%.
|
* if we allocate one hunk per page, we have overhead of 40/128 or 40%.
|
||||||
|
@ -886,10 +885,16 @@ typedef struct TargetPageDataNode {
|
||||||
} TargetPageDataNode;
|
} TargetPageDataNode;
|
||||||
|
|
||||||
static IntervalTreeRoot targetdata_root;
|
static IntervalTreeRoot targetdata_root;
|
||||||
|
static size_t target_page_data_size;
|
||||||
|
|
||||||
void page_reset_target_data(vaddr start, vaddr last)
|
void page_reset_target_data(vaddr start, vaddr last)
|
||||||
{
|
{
|
||||||
IntervalTreeNode *n, *next;
|
IntervalTreeNode *n, *next;
|
||||||
|
size_t size = target_page_data_size;
|
||||||
|
|
||||||
|
if (likely(size == 0)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
assert_memory_lock();
|
assert_memory_lock();
|
||||||
|
|
||||||
|
@ -920,17 +925,22 @@ void page_reset_target_data(vaddr start, vaddr last)
|
||||||
n_last = MIN(last, n->last);
|
n_last = MIN(last, n->last);
|
||||||
p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
|
p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
|
||||||
|
|
||||||
memset(t->data + p_ofs * TARGET_PAGE_DATA_SIZE, 0,
|
memset(t->data + p_ofs * size, 0, p_len * size);
|
||||||
p_len * TARGET_PAGE_DATA_SIZE);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void *page_get_target_data(vaddr address)
|
void *page_get_target_data(vaddr address, size_t size)
|
||||||
{
|
{
|
||||||
IntervalTreeNode *n;
|
IntervalTreeNode *n;
|
||||||
TargetPageDataNode *t;
|
TargetPageDataNode *t;
|
||||||
vaddr page, region, p_ofs;
|
vaddr page, region, p_ofs;
|
||||||
|
|
||||||
|
/* Remember the size from the first call, and it should be constant. */
|
||||||
|
if (unlikely(target_page_data_size != size)) {
|
||||||
|
assert(target_page_data_size == 0);
|
||||||
|
target_page_data_size = size;
|
||||||
|
}
|
||||||
|
|
||||||
page = address & TARGET_PAGE_MASK;
|
page = address & TARGET_PAGE_MASK;
|
||||||
region = address & TBD_MASK;
|
region = address & TBD_MASK;
|
||||||
|
|
||||||
|
@ -945,8 +955,7 @@ void *page_get_target_data(vaddr address)
|
||||||
mmap_lock();
|
mmap_lock();
|
||||||
n = interval_tree_iter_first(&targetdata_root, page, page);
|
n = interval_tree_iter_first(&targetdata_root, page, page);
|
||||||
if (!n) {
|
if (!n) {
|
||||||
t = g_malloc0(sizeof(TargetPageDataNode)
|
t = g_malloc0(sizeof(TargetPageDataNode) + TPD_PAGES * size);
|
||||||
+ TPD_PAGES * TARGET_PAGE_DATA_SIZE);
|
|
||||||
n = &t->itree;
|
n = &t->itree;
|
||||||
n->start = region;
|
n->start = region;
|
||||||
n->last = region | ~TBD_MASK;
|
n->last = region | ~TBD_MASK;
|
||||||
|
@ -957,11 +966,8 @@ void *page_get_target_data(vaddr address)
|
||||||
|
|
||||||
t = container_of(n, TargetPageDataNode, itree);
|
t = container_of(n, TargetPageDataNode, itree);
|
||||||
p_ofs = (page - region) >> TARGET_PAGE_BITS;
|
p_ofs = (page - region) >> TARGET_PAGE_BITS;
|
||||||
return t->data + p_ofs * TARGET_PAGE_DATA_SIZE;
|
return t->data + p_ofs * size;
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
void page_reset_target_data(vaddr start, vaddr last) { }
|
|
||||||
#endif /* TARGET_PAGE_DATA_SIZE */
|
|
||||||
|
|
||||||
/* The system-mode versions of these helpers are in cputlb.c. */
|
/* The system-mode versions of these helpers are in cputlb.c. */
|
||||||
|
|
||||||
|
|
|
@ -73,18 +73,20 @@ bool page_check_range_empty(vaddr start, vaddr last);
|
||||||
vaddr page_find_range_empty(vaddr min, vaddr max, vaddr len, vaddr align);
|
vaddr page_find_range_empty(vaddr min, vaddr max, vaddr len, vaddr align);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* page_get_target_data(address)
|
* page_get_target_data
|
||||||
* @address: guest virtual address
|
* @address: guest virtual address
|
||||||
|
* @size: per-page size
|
||||||
*
|
*
|
||||||
* Return TARGET_PAGE_DATA_SIZE bytes of out-of-band data to associate
|
* Return @size bytes of out-of-band data to associate
|
||||||
* with the guest page at @address, allocating it if necessary. The
|
* with the guest page at @address, allocating it if necessary. The
|
||||||
* caller should already have verified that the address is valid.
|
* caller should already have verified that the address is valid.
|
||||||
|
* The value of @size must be the same for every call.
|
||||||
*
|
*
|
||||||
* The memory will be freed when the guest page is deallocated,
|
* The memory will be freed when the guest page is deallocated,
|
||||||
* e.g. with the munmap system call.
|
* e.g. with the munmap system call.
|
||||||
*/
|
*/
|
||||||
__attribute__((returns_nonnull))
|
__attribute__((returns_nonnull))
|
||||||
void *page_get_target_data(vaddr address);
|
void *page_get_target_data(vaddr address, size_t size);
|
||||||
|
|
||||||
typedef int (*walk_memory_regions_fn)(void *, vaddr, vaddr, int);
|
typedef int (*walk_memory_regions_fn)(void *, vaddr, vaddr, int);
|
||||||
int walk_memory_regions(void *, walk_memory_regions_fn);
|
int walk_memory_regions(void *, walk_memory_regions_fn);
|
||||||
|
|
|
@ -3213,8 +3213,4 @@ extern const uint64_t pred_esz_masks[5];
|
||||||
#define LOG2_TAG_GRANULE 4
|
#define LOG2_TAG_GRANULE 4
|
||||||
#define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
|
#define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
|
||||||
|
|
||||||
#ifdef CONFIG_USER_ONLY
|
|
||||||
#define TARGET_PAGE_DATA_SIZE (TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1))
|
|
||||||
#endif /* CONFIG_USER_ONLY */
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -37,7 +37,6 @@
|
||||||
#include "qemu/guest-random.h"
|
#include "qemu/guest-random.h"
|
||||||
#include "mte_helper.h"
|
#include "mte_helper.h"
|
||||||
|
|
||||||
|
|
||||||
static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
|
static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
|
||||||
{
|
{
|
||||||
if (exclude == 0xffff) {
|
if (exclude == 0xffff) {
|
||||||
|
@ -63,6 +62,7 @@ uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx,
|
||||||
bool probe, uintptr_t ra)
|
bool probe, uintptr_t ra)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_USER_ONLY
|
#ifdef CONFIG_USER_ONLY
|
||||||
|
const size_t page_data_size = TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1);
|
||||||
uint64_t clean_ptr = useronly_clean_ptr(ptr);
|
uint64_t clean_ptr = useronly_clean_ptr(ptr);
|
||||||
int flags = page_get_flags(clean_ptr);
|
int flags = page_get_flags(clean_ptr);
|
||||||
uint8_t *tags;
|
uint8_t *tags;
|
||||||
|
@ -83,7 +83,7 @@ uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
tags = page_get_target_data(clean_ptr);
|
tags = page_get_target_data(clean_ptr, page_data_size);
|
||||||
|
|
||||||
index = extract32(ptr, LOG2_TAG_GRANULE + 1,
|
index = extract32(ptr, LOG2_TAG_GRANULE + 1,
|
||||||
TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
|
TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue