tcg: move tb_ctx.tb_phys_invalidate_count to tcg_ctx

Thereby making it per-TCGContext. Once we remove tb_lock, this will
avoid an atomic increment every time a TB is invalidated.

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Emilio G. Cota 2017-08-01 15:11:12 -04:00 committed by Richard Henderson
parent be2cdc5e35
commit 128ed2278c
4 changed files with 20 additions and 3 deletions

View file

@ -1069,7 +1069,8 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
/* suppress any remaining jumps to this TB */
tb_jmp_unlink(tb);
tb_ctx.tb_phys_invalidate_count++;
atomic_set(&tcg_ctx->tb_phys_invalidate_count,
tcg_ctx->tb_phys_invalidate_count + 1);
}
#ifdef CONFIG_SOFTMMU
@ -1855,7 +1856,7 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
cpu_fprintf(f, "\nStatistics:\n");
cpu_fprintf(f, "TB flush count %u\n",
atomic_read(&tb_ctx.tb_flush_count));
cpu_fprintf(f, "TB invalidate count %d\n", tb_ctx.tb_phys_invalidate_count);
cpu_fprintf(f, "TB invalidate count %zu\n", tcg_tb_phys_invalidate_count());
cpu_fprintf(f, "TLB flush count %zu\n", tlb_flush_count());
tcg_dump_info(f, cpu_fprintf);
}