mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-05 16:53:55 -06:00
tcg: Check for overflow via highwater mark
We currently pre-compute an worst case code size for any TB, which works out to be 122kB. Since the average TB size is near 1kB, this wastes quite a lot of storage. Instead, check for overflow in between generating code for each opcode. The overhead of the check isn't measurable and wastage is minimized. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <rth@twiddle.net>
This commit is contained in:
parent
f293709c6a
commit
b125f9dc7b
4 changed files with 40 additions and 16 deletions
|
@ -223,6 +223,7 @@ static target_long decode_sleb128(uint8_t **pp)
|
|||
|
||||
static int encode_search(TranslationBlock *tb, uint8_t *block)
|
||||
{
|
||||
uint8_t *highwater = tcg_ctx.code_gen_highwater;
|
||||
uint8_t *p = block;
|
||||
int i, j, n;
|
||||
|
||||
|
@ -241,6 +242,14 @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
|
|||
}
|
||||
prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
|
||||
p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
|
||||
|
||||
/* Test for (pending) buffer overflow. The assumption is that any
|
||||
one row beginning below the high water mark cannot overrun
|
||||
the buffer completely. Thus we can test for overflow after
|
||||
encoding a row without having to check during encoding. */
|
||||
if (unlikely(p > highwater)) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return p - block;
|
||||
|
@ -756,9 +765,7 @@ static TranslationBlock *tb_alloc(target_ulong pc)
|
|||
{
|
||||
TranslationBlock *tb;
|
||||
|
||||
if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
|
||||
(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
|
||||
tcg_ctx.code_gen_buffer_max_size) {
|
||||
if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
|
||||
return NULL;
|
||||
}
|
||||
tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
|
||||
|
@ -1063,12 +1070,15 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
|||
if (use_icount) {
|
||||
cflags |= CF_USE_ICOUNT;
|
||||
}
|
||||
|
||||
tb = tb_alloc(pc);
|
||||
if (!tb) {
|
||||
if (unlikely(!tb)) {
|
||||
buffer_overflow:
|
||||
/* flush must be done */
|
||||
tb_flush(cpu);
|
||||
/* cannot fail at this point */
|
||||
tb = tb_alloc(pc);
|
||||
assert(tb != NULL);
|
||||
/* Don't forget to invalidate previous TB info. */
|
||||
tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
|
||||
}
|
||||
|
@ -1109,8 +1119,19 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
|||
tcg_ctx.code_time -= profile_getclock();
|
||||
#endif
|
||||
|
||||
/* ??? Overflow could be handled better here. In particular, we
|
||||
don't need to re-do gen_intermediate_code, nor should we re-do
|
||||
the tcg optimization currently hidden inside tcg_gen_code. All
|
||||
that should be required is to flush the TBs, allocate a new TB,
|
||||
re-initialize it per above, and re-do the actual code generation. */
|
||||
gen_code_size = tcg_gen_code(&tcg_ctx, gen_code_buf);
|
||||
if (unlikely(gen_code_size < 0)) {
|
||||
goto buffer_overflow;
|
||||
}
|
||||
search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
|
||||
if (unlikely(search_size < 0)) {
|
||||
goto buffer_overflow;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROFILER
|
||||
tcg_ctx.code_time += profile_getclock();
|
||||
|
@ -1681,7 +1702,7 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
|
|||
cpu_fprintf(f, "Translation buffer state:\n");
|
||||
cpu_fprintf(f, "gen code size %td/%zd\n",
|
||||
tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
|
||||
tcg_ctx.code_gen_buffer_max_size);
|
||||
tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
|
||||
cpu_fprintf(f, "TB count %d/%d\n",
|
||||
tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
|
||||
cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue