bochs: Check catalog_size header field (CVE-2014-0143)

It should neither become negative nor allow unbounded memory
allocations. This fixes aborts in g_malloc() and an s->catalog_bitmap
buffer overflow on big endian hosts.

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Kevin Wolf 2014-03-26 13:05:33 +01:00 committed by Stefan Hajnoczi
parent 246f65838d
commit e3737b820b
3 changed files with 35 additions and 1 deletions

View file

@ -123,7 +123,14 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
bs->total_sectors = le64_to_cpu(bochs.extra.redolog.disk) / 512;
}
/* Limit to 1M entries to avoid unbounded allocation. This is what is
* needed for the largest image that bximage can create (~8 TB). */
s->catalog_size = le32_to_cpu(bochs.catalog);
if (s->catalog_size > 0x100000) {
error_setg(errp, "Catalog size is too large");
return -EFBIG;
}
s->catalog_bitmap = g_malloc(s->catalog_size * 4);
ret = bdrv_pread(bs->file, le32_to_cpu(bochs.header), s->catalog_bitmap,
@ -142,6 +149,12 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
s->extent_size = le32_to_cpu(bochs.extent);
if (s->catalog_size < bs->total_sectors / s->extent_size) {
error_setg(errp, "Catalog size is too small for this disk size");
ret = -EINVAL;
goto fail;
}
qemu_co_mutex_init(&s->lock);
return 0;