mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-05 16:53:55 -06:00
accel/tcg: Move PageDesc tree into tb-maint.c for system
Now that PageDesc is not used for user-only, and for system it is only used for tb maintenance, move the implementation into tb-main.c appropriately ifdefed. We have not yet eliminated all references to PageDesc for user-only, so retain a typedef to the structure without definition. Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
67ff2186b0
commit
babcbc220b
3 changed files with 124 additions and 140 deletions
|
@ -114,37 +114,8 @@ QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
|
|||
sizeof_field(TranslationBlock, trace_vcpu_dstate)
|
||||
* BITS_PER_BYTE);
|
||||
|
||||
/*
|
||||
* L1 Mapping properties
|
||||
*/
|
||||
int v_l1_size;
|
||||
int v_l1_shift;
|
||||
int v_l2_levels;
|
||||
|
||||
void *l1_map[V_L1_MAX_SIZE];
|
||||
|
||||
TBContext tb_ctx;
|
||||
|
||||
static void page_table_config_init(void)
|
||||
{
|
||||
uint32_t v_l1_bits;
|
||||
|
||||
assert(TARGET_PAGE_BITS);
|
||||
/* The bits remaining after N lower levels of page tables. */
|
||||
v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
|
||||
if (v_l1_bits < V_L1_MIN_BITS) {
|
||||
v_l1_bits += V_L2_BITS;
|
||||
}
|
||||
|
||||
v_l1_size = 1 << v_l1_bits;
|
||||
v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
|
||||
v_l2_levels = v_l1_shift / V_L2_BITS - 1;
|
||||
|
||||
assert(v_l1_bits <= V_L1_MAX_BITS);
|
||||
assert(v_l1_shift % V_L2_BITS == 0);
|
||||
assert(v_l2_levels >= 0);
|
||||
}
|
||||
|
||||
/* Encode VAL as a signed leb128 sequence at P.
|
||||
Return P incremented past the encoded value. */
|
||||
static uint8_t *encode_sleb128(uint8_t *p, target_long val)
|
||||
|
@ -339,72 +310,6 @@ void page_init(void)
|
|||
page_table_config_init();
|
||||
}
|
||||
|
||||
PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc)
|
||||
{
|
||||
PageDesc *pd;
|
||||
void **lp;
|
||||
int i;
|
||||
|
||||
/* Level 1. Always allocated. */
|
||||
lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
|
||||
|
||||
/* Level 2..N-1. */
|
||||
for (i = v_l2_levels; i > 0; i--) {
|
||||
void **p = qatomic_rcu_read(lp);
|
||||
|
||||
if (p == NULL) {
|
||||
void *existing;
|
||||
|
||||
if (!alloc) {
|
||||
return NULL;
|
||||
}
|
||||
p = g_new0(void *, V_L2_SIZE);
|
||||
existing = qatomic_cmpxchg(lp, NULL, p);
|
||||
if (unlikely(existing)) {
|
||||
g_free(p);
|
||||
p = existing;
|
||||
}
|
||||
}
|
||||
|
||||
lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
|
||||
}
|
||||
|
||||
pd = qatomic_rcu_read(lp);
|
||||
if (pd == NULL) {
|
||||
void *existing;
|
||||
|
||||
if (!alloc) {
|
||||
return NULL;
|
||||
}
|
||||
pd = g_new0(PageDesc, V_L2_SIZE);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < V_L2_SIZE; i++) {
|
||||
qemu_spin_init(&pd[i].lock);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
existing = qatomic_cmpxchg(lp, NULL, pd);
|
||||
if (unlikely(existing)) {
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < V_L2_SIZE; i++) {
|
||||
qemu_spin_destroy(&pd[i].lock);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
g_free(pd);
|
||||
pd = existing;
|
||||
}
|
||||
}
|
||||
|
||||
return pd + (index & (V_L2_SIZE - 1));
|
||||
}
|
||||
|
||||
/* In user-mode page locks aren't used; mmap_lock is enough */
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
struct page_collection *
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue