tcg: Prepare safe tb_jmp_cache lookup out of tb_lock

Ensure atomicity of CPU's 'tb_jmp_cache' access for future translation
block lookup out of 'tb_lock'.

Note that this patch does *not* make CPU's TLB invalidation safe if it
is done from some other thread while the CPU is in its execution loop.

Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-Id: <20160715175852.30749-4-sergey.fedorov@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sergey Fedorov 2016-07-15 20:58:43 +03:00 committed by Paolo Bonzini
parent 4b7e69509d
commit 89a16b1e42
2 changed files with 9 additions and 5 deletions

View file

@ -315,7 +315,7 @@ static TranslationBlock *tb_find_slow(CPUState *cpu,
found:
/* we add the TB in the virtual pc hash table */
cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
return tb;
}
@ -333,7 +333,7 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu,
is executed. */
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
tb_lock();
tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]);
if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
tb->flags != flags)) {
tb = tb_find_slow(cpu, pc, cs_base, flags);