mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-03 15:53:54 -06:00
tcg: Prepare safe access to tb_flushed out of tb_lock
Ensure atomicity and ordering of CPU's 'tb_flushed' access for future translation block lookup out of 'tb_lock'. This field can only be touched from another thread by tb_flush() in user mode emulation. So the only access to be sequential atomic is: * a single write in tb_flush(); * reads/writes out of 'tb_lock'. In future, before enabling MTTCG in system mode, tb_flush() must be safe and this field becomes unnecessary. Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com> Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Message-Id: <20160715175852.30749-5-sergey.fedorov@linaro.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
89a16b1e42
commit
118b07308a
2 changed files with 9 additions and 11 deletions
|
@ -848,7 +848,6 @@ void tb_flush(CPUState *cpu)
|
|||
> tcg_ctx.code_gen_buffer_size) {
|
||||
cpu_abort(cpu, "Internal error: code buffer overflow\n");
|
||||
}
|
||||
tcg_ctx.tb_ctx.nb_tbs = 0;
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
int i;
|
||||
|
@ -856,9 +855,10 @@ void tb_flush(CPUState *cpu)
|
|||
for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
|
||||
atomic_set(&cpu->tb_jmp_cache[i], NULL);
|
||||
}
|
||||
cpu->tb_flushed = true;
|
||||
atomic_mb_set(&cpu->tb_flushed, true);
|
||||
}
|
||||
|
||||
tcg_ctx.tb_ctx.nb_tbs = 0;
|
||||
qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
|
||||
page_flush_tb();
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue