mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-09-09 16:28:02 -06:00
* Support for jemalloc
* qemu_mutex_lock_iothread "No such process" fix * cutils: qemu_strto* wrappers * iohandler.c simplification * Many other fixes and misc patches. And some MTTCG work (with Emilio's fixes squashed): * Signal-free TCG kick * Removing spinlock in favor of QemuMutex * User-mode emulation multi-threading fixes/docs -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQEcBAABCAAGBQJV8Tk7AAoJEL/70l94x66Ds3QH/3bi0RRR2NtKIXAQrGo5tfuD NPMu1K5Hy+/26AC6mEVNRh4kh7dPH5E4NnDGbxet1+osvmpjxAjc2JrxEybhHD0j fkpzqynuBN6cA2Gu5GUNoKzxxTmi2RrEYigWDZqCftRXBeO2Hsr1etxJh9UoZw5H dgpU3j/n0Q8s08jUJ1o789knZI/ckwL4oXK4u2KhSC7ZTCWhJT7Qr7c0JmiKReaF JEYAsKkQhICVKRVmC8NxML8U58O8maBjQ62UN6nQpVaQd0Yo/6cstFTZsRrHMHL3 7A2Tyg862cMvp+1DOX3Bk02yXA+nxnzLF8kUe0rYo6llqDBDStzqyn1j9R0qeqA= =nB06 -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging * Support for jemalloc * qemu_mutex_lock_iothread "No such process" fix * cutils: qemu_strto* wrappers * iohandler.c simplification * Many other fixes and misc patches. And some MTTCG work (with Emilio's fixes squashed): * Signal-free TCG kick * Removing spinlock in favor of QemuMutex * User-mode emulation multi-threading fixes/docs # gpg: Signature made Thu 10 Sep 2015 09:03:07 BST using RSA key ID 78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" * remotes/bonzini/tags/for-upstream: (44 commits) cutils: work around platform differences in strto{l,ul,ll,ull} cpu-exec: fix lock hierarchy for user-mode emulation exec: make mmap_lock/mmap_unlock globally available tcg: comment on which functions have to be called with mmap_lock held tcg: add memory barriers in page_find_alloc accesses remove unused spinlock. replace spinlock by QemuMutex. cpus: remove tcg_halt_cond and tcg_cpu_thread globals cpus: protect work list with work_mutex scripts/dump-guest-memory.py: fix after RAMBlock change configure: Add support for jemalloc add macro file for coccinelle configure: factor out adding disas configure vhost-scsi: fix wrong vhost-scsi firmware path checkpatch: remove tests that are not relevant outside the kernel checkpatch: adapt some tests to QEMU CODING_STYLE: update mixed declaration rules qmp: Add example usage of strto*l() qemu wrapper cutils: Add qemu_strtoull() wrapper cutils: Add qemu_strtoll() wrapper ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
a2aa09e181
56 changed files with 2049 additions and 1043 deletions
119
cpu-exec.c
119
cpu-exec.c
|
@ -258,10 +258,10 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
|
|||
tb_free(tb);
|
||||
}
|
||||
|
||||
static TranslationBlock *tb_find_slow(CPUState *cpu,
|
||||
target_ulong pc,
|
||||
target_ulong cs_base,
|
||||
uint64_t flags)
|
||||
static TranslationBlock *tb_find_physical(CPUState *cpu,
|
||||
target_ulong pc,
|
||||
target_ulong cs_base,
|
||||
uint64_t flags)
|
||||
{
|
||||
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
|
||||
TranslationBlock *tb, **ptb1;
|
||||
|
@ -278,8 +278,9 @@ static TranslationBlock *tb_find_slow(CPUState *cpu,
|
|||
ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
|
||||
for(;;) {
|
||||
tb = *ptb1;
|
||||
if (!tb)
|
||||
goto not_found;
|
||||
if (!tb) {
|
||||
return NULL;
|
||||
}
|
||||
if (tb->pc == pc &&
|
||||
tb->page_addr[0] == phys_page1 &&
|
||||
tb->cs_base == cs_base &&
|
||||
|
@ -291,25 +292,59 @@ static TranslationBlock *tb_find_slow(CPUState *cpu,
|
|||
virt_page2 = (pc & TARGET_PAGE_MASK) +
|
||||
TARGET_PAGE_SIZE;
|
||||
phys_page2 = get_page_addr_code(env, virt_page2);
|
||||
if (tb->page_addr[1] == phys_page2)
|
||||
goto found;
|
||||
if (tb->page_addr[1] == phys_page2) {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
goto found;
|
||||
break;
|
||||
}
|
||||
}
|
||||
ptb1 = &tb->phys_hash_next;
|
||||
}
|
||||
not_found:
|
||||
/* if no translated code available, then translate it now */
|
||||
|
||||
/* Move the TB to the head of the list */
|
||||
*ptb1 = tb->phys_hash_next;
|
||||
tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
|
||||
tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
|
||||
return tb;
|
||||
}
|
||||
|
||||
static TranslationBlock *tb_find_slow(CPUState *cpu,
|
||||
target_ulong pc,
|
||||
target_ulong cs_base,
|
||||
uint64_t flags)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
|
||||
tb = tb_find_physical(cpu, pc, cs_base, flags);
|
||||
if (tb) {
|
||||
goto found;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
/* mmap_lock is needed by tb_gen_code, and mmap_lock must be
|
||||
* taken outside tb_lock. Since we're momentarily dropping
|
||||
* tb_lock, there's a chance that our desired tb has been
|
||||
* translated.
|
||||
*/
|
||||
tb_unlock();
|
||||
mmap_lock();
|
||||
tb_lock();
|
||||
tb = tb_find_physical(cpu, pc, cs_base, flags);
|
||||
if (tb) {
|
||||
mmap_unlock();
|
||||
goto found;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* if no translated code available, then translate it now */
|
||||
tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
|
||||
|
||||
found:
|
||||
/* Move the last found TB to the head of the list */
|
||||
if (likely(*ptb1)) {
|
||||
*ptb1 = tb->phys_hash_next;
|
||||
tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
|
||||
tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
|
||||
}
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
mmap_unlock();
|
||||
#endif
|
||||
|
||||
found:
|
||||
/* we add the TB in the virtual pc hash table */
|
||||
cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
|
||||
return tb;
|
||||
|
@ -350,7 +385,8 @@ static void cpu_handle_debug_exception(CPUState *cpu)
|
|||
|
||||
/* main execution loop */
|
||||
|
||||
volatile sig_atomic_t exit_request;
|
||||
bool exit_request;
|
||||
CPUState *tcg_current_cpu;
|
||||
|
||||
int cpu_exec(CPUState *cpu)
|
||||
{
|
||||
|
@ -365,9 +401,6 @@ int cpu_exec(CPUState *cpu)
|
|||
uintptr_t next_tb;
|
||||
SyncClocks sc;
|
||||
|
||||
/* This must be volatile so it is not trashed by longjmp() */
|
||||
volatile bool have_tb_lock = false;
|
||||
|
||||
if (cpu->halted) {
|
||||
if (!cpu_has_work(cpu)) {
|
||||
return EXCP_HALTED;
|
||||
|
@ -377,18 +410,10 @@ int cpu_exec(CPUState *cpu)
|
|||
}
|
||||
|
||||
current_cpu = cpu;
|
||||
|
||||
/* As long as current_cpu is null, up to the assignment just above,
|
||||
* requests by other threads to exit the execution loop are expected to
|
||||
* be issued using the exit_request global. We must make sure that our
|
||||
* evaluation of the global value is performed past the current_cpu
|
||||
* value transition point, which requires a memory barrier as well as
|
||||
* an instruction scheduling constraint on modern architectures. */
|
||||
smp_mb();
|
||||
|
||||
atomic_mb_set(&tcg_current_cpu, cpu);
|
||||
rcu_read_lock();
|
||||
|
||||
if (unlikely(exit_request)) {
|
||||
if (unlikely(atomic_mb_read(&exit_request))) {
|
||||
cpu->exit_request = 1;
|
||||
}
|
||||
|
||||
|
@ -484,8 +509,7 @@ int cpu_exec(CPUState *cpu)
|
|||
cpu->exception_index = EXCP_INTERRUPT;
|
||||
cpu_loop_exit(cpu);
|
||||
}
|
||||
spin_lock(&tcg_ctx.tb_ctx.tb_lock);
|
||||
have_tb_lock = true;
|
||||
tb_lock();
|
||||
tb = tb_find_fast(cpu);
|
||||
/* Note: we do it here to avoid a gcc bug on Mac OS X when
|
||||
doing it in tb_find_slow */
|
||||
|
@ -507,20 +531,14 @@ int cpu_exec(CPUState *cpu)
|
|||
tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
|
||||
next_tb & TB_EXIT_MASK, tb);
|
||||
}
|
||||
have_tb_lock = false;
|
||||
spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
|
||||
|
||||
/* cpu_interrupt might be called while translating the
|
||||
TB, but before it is linked into a potentially
|
||||
infinite loop and becomes env->current_tb. Avoid
|
||||
starting execution if there is a pending interrupt. */
|
||||
cpu->current_tb = tb;
|
||||
barrier();
|
||||
tb_unlock();
|
||||
if (likely(!cpu->exit_request)) {
|
||||
trace_exec_tb(tb, tb->pc);
|
||||
tc_ptr = tb->tc_ptr;
|
||||
/* execute the generated code */
|
||||
cpu->current_tb = tb;
|
||||
next_tb = cpu_tb_exec(cpu, tc_ptr);
|
||||
cpu->current_tb = NULL;
|
||||
switch (next_tb & TB_EXIT_MASK) {
|
||||
case TB_EXIT_REQUESTED:
|
||||
/* Something asked us to stop executing
|
||||
|
@ -528,8 +546,12 @@ int cpu_exec(CPUState *cpu)
|
|||
* loop. Whatever requested the exit will also
|
||||
* have set something else (eg exit_request or
|
||||
* interrupt_request) which we will handle
|
||||
* next time around the loop.
|
||||
* next time around the loop. But we need to
|
||||
* ensure the tcg_exit_req read in generated code
|
||||
* comes before the next read of cpu->exit_request
|
||||
* or cpu->interrupt_request.
|
||||
*/
|
||||
smp_rmb();
|
||||
next_tb = 0;
|
||||
break;
|
||||
case TB_EXIT_ICOUNT_EXPIRED:
|
||||
|
@ -559,7 +581,6 @@ int cpu_exec(CPUState *cpu)
|
|||
break;
|
||||
}
|
||||
}
|
||||
cpu->current_tb = NULL;
|
||||
/* Try to align the host and virtual clocks
|
||||
if the guest is in advance */
|
||||
align_clocks(&sc, cpu);
|
||||
|
@ -576,10 +597,7 @@ int cpu_exec(CPUState *cpu)
|
|||
x86_cpu = X86_CPU(cpu);
|
||||
env = &x86_cpu->env;
|
||||
#endif
|
||||
if (have_tb_lock) {
|
||||
spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
|
||||
have_tb_lock = false;
|
||||
}
|
||||
tb_lock_reset();
|
||||
}
|
||||
} /* for(;;) */
|
||||
|
||||
|
@ -588,5 +606,8 @@ int cpu_exec(CPUState *cpu)
|
|||
|
||||
/* fail safe : never use current_cpu outside cpu_exec() */
|
||||
current_cpu = NULL;
|
||||
|
||||
/* Does not need atomic_mb_set because a spurious wakeup is okay. */
|
||||
atomic_set(&tcg_current_cpu, NULL);
|
||||
return ret;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue