mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-02 07:13:54 -06:00
accel/tcg: Use interval tree for TBs in user-only mode
Begin weaning user-only away from PageDesc. Since, for user-only, all TB (and page) manipulation is done with a single mutex, and there is no virtual/physical discontinuity to split a TB across discontinuous pages, place all of the TBs into a single IntervalTree. This makes it trivial to find all of the TBs intersecting a range. Retain the existing PageDesc + linked list implementation for system mode. Move the portion of the implementation that overlaps the new user-only code behind the common ifdef. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
bf590a67dd
commit
a97d5d2c8b
4 changed files with 279 additions and 171 deletions
|
@ -24,6 +24,7 @@
|
|||
#ifdef CONFIG_TCG
|
||||
#include "exec/cpu_ldst.h"
|
||||
#endif
|
||||
#include "qemu/interval-tree.h"
|
||||
|
||||
/* allow to see translation results - the slowdown should be negligible, so we leave it */
|
||||
#define DEBUG_DISAS
|
||||
|
@ -559,11 +560,20 @@ struct TranslationBlock {
|
|||
|
||||
struct tb_tc tc;
|
||||
|
||||
/* first and second physical page containing code. The lower bit
|
||||
of the pointer tells the index in page_next[].
|
||||
The list is protected by the TB's page('s) lock(s) */
|
||||
/*
|
||||
* Track tb_page_addr_t intervals that intersect this TB.
|
||||
* For user-only, the virtual addresses are always contiguous,
|
||||
* and we use a unified interval tree. For system, we use a
|
||||
* linked list headed in each PageDesc. Within the list, the lsb
|
||||
* of the previous pointer tells the index of page_next[], and the
|
||||
* list is protected by the PageDesc lock(s).
|
||||
*/
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
IntervalTreeNode itree;
|
||||
#else
|
||||
uintptr_t page_next[2];
|
||||
tb_page_addr_t page_addr[2];
|
||||
#endif
|
||||
|
||||
/* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
|
||||
QemuSpin jmp_lock;
|
||||
|
@ -619,24 +629,51 @@ static inline uint32_t tb_cflags(const TranslationBlock *tb)
|
|||
|
||||
static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb)
|
||||
{
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
return tb->itree.start;
|
||||
#else
|
||||
return tb->page_addr[0];
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb)
|
||||
{
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
tb_page_addr_t next = tb->itree.last & TARGET_PAGE_MASK;
|
||||
return next == (tb->itree.start & TARGET_PAGE_MASK) ? -1 : next;
|
||||
#else
|
||||
return tb->page_addr[1];
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void tb_set_page_addr0(TranslationBlock *tb,
|
||||
tb_page_addr_t addr)
|
||||
{
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
tb->itree.start = addr;
|
||||
/*
|
||||
* To begin, we record an interval of one byte. When the translation
|
||||
* loop encounters a second page, the interval will be extended to
|
||||
* include the first byte of the second page, which is sufficient to
|
||||
* allow tb_page_addr1() above to work properly. The final corrected
|
||||
* interval will be set by tb_page_add() from tb->size before the
|
||||
* node is added to the interval tree.
|
||||
*/
|
||||
tb->itree.last = addr;
|
||||
#else
|
||||
tb->page_addr[0] = addr;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void tb_set_page_addr1(TranslationBlock *tb,
|
||||
tb_page_addr_t addr)
|
||||
{
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
/* Extend the interval to the first byte of the second page. See above. */
|
||||
tb->itree.last = addr;
|
||||
#else
|
||||
tb->page_addr[1] = addr;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* current cflags for hashing/comparison */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue