mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-07-27 12:23:53 -06:00
linux-user: Honor elf alignment when placing images
Most binaries don't actually depend on more than page alignment, but any binary can request it. Not honoring this was a bug. This became obvious when gdb reported Failed to read a valid object file image from memory when examining some vdso which are marked as needing more than page alignment. Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
779f30a01a
commit
c81d1fafa6
1 changed files with 28 additions and 7 deletions
|
@ -3179,7 +3179,8 @@ static void load_elf_image(const char *image_name, const ImageSource *src,
|
||||||
char **pinterp_name)
|
char **pinterp_name)
|
||||||
{
|
{
|
||||||
g_autofree struct elf_phdr *phdr = NULL;
|
g_autofree struct elf_phdr *phdr = NULL;
|
||||||
abi_ulong load_addr, load_bias, loaddr, hiaddr, error;
|
abi_ulong load_addr, load_bias, loaddr, hiaddr, error, align;
|
||||||
|
size_t reserve_size, align_size;
|
||||||
int i, prot_exec;
|
int i, prot_exec;
|
||||||
Error *err = NULL;
|
Error *err = NULL;
|
||||||
|
|
||||||
|
@ -3263,6 +3264,9 @@ static void load_elf_image(const char *image_name, const ImageSource *src,
|
||||||
|
|
||||||
load_addr = loaddr;
|
load_addr = loaddr;
|
||||||
|
|
||||||
|
align = pow2ceil(info->alignment);
|
||||||
|
info->alignment = align;
|
||||||
|
|
||||||
if (pinterp_name != NULL) {
|
if (pinterp_name != NULL) {
|
||||||
if (ehdr->e_type == ET_EXEC) {
|
if (ehdr->e_type == ET_EXEC) {
|
||||||
/*
|
/*
|
||||||
|
@ -3271,8 +3275,6 @@ static void load_elf_image(const char *image_name, const ImageSource *src,
|
||||||
*/
|
*/
|
||||||
probe_guest_base(image_name, loaddr, hiaddr);
|
probe_guest_base(image_name, loaddr, hiaddr);
|
||||||
} else {
|
} else {
|
||||||
abi_ulong align;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The binary is dynamic, but we still need to
|
* The binary is dynamic, but we still need to
|
||||||
* select guest_base. In this case we pass a size.
|
* select guest_base. In this case we pass a size.
|
||||||
|
@ -3290,10 +3292,7 @@ static void load_elf_image(const char *image_name, const ImageSource *src,
|
||||||
* Since we do not have complete control over the guest
|
* Since we do not have complete control over the guest
|
||||||
* address space, we prefer the kernel to choose some address
|
* address space, we prefer the kernel to choose some address
|
||||||
* rather than force the use of LOAD_ADDR via MAP_FIXED.
|
* rather than force the use of LOAD_ADDR via MAP_FIXED.
|
||||||
* But without MAP_FIXED we cannot guarantee alignment,
|
|
||||||
* only suggest it.
|
|
||||||
*/
|
*/
|
||||||
align = pow2ceil(info->alignment);
|
|
||||||
if (align) {
|
if (align) {
|
||||||
load_addr &= -align;
|
load_addr &= -align;
|
||||||
}
|
}
|
||||||
|
@ -3317,13 +3316,35 @@ static void load_elf_image(const char *image_name, const ImageSource *src,
|
||||||
* In both cases, we will overwrite pages in this range with mappings
|
* In both cases, we will overwrite pages in this range with mappings
|
||||||
* from the executable.
|
* from the executable.
|
||||||
*/
|
*/
|
||||||
load_addr = target_mmap(load_addr, (size_t)hiaddr - loaddr + 1, PROT_NONE,
|
reserve_size = (size_t)hiaddr - loaddr + 1;
|
||||||
|
align_size = reserve_size;
|
||||||
|
|
||||||
|
if (ehdr->e_type != ET_EXEC && align > qemu_real_host_page_size()) {
|
||||||
|
align_size += align - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
load_addr = target_mmap(load_addr, align_size, PROT_NONE,
|
||||||
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE |
|
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE |
|
||||||
(ehdr->e_type == ET_EXEC ? MAP_FIXED_NOREPLACE : 0),
|
(ehdr->e_type == ET_EXEC ? MAP_FIXED_NOREPLACE : 0),
|
||||||
-1, 0);
|
-1, 0);
|
||||||
if (load_addr == -1) {
|
if (load_addr == -1) {
|
||||||
goto exit_mmap;
|
goto exit_mmap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (align_size != reserve_size) {
|
||||||
|
abi_ulong align_addr = ROUND_UP(load_addr, align);
|
||||||
|
abi_ulong align_end = align_addr + reserve_size;
|
||||||
|
abi_ulong load_end = load_addr + align_size;
|
||||||
|
|
||||||
|
if (align_addr != load_addr) {
|
||||||
|
target_munmap(load_addr, align_addr - load_addr);
|
||||||
|
}
|
||||||
|
if (align_end != load_end) {
|
||||||
|
target_munmap(align_end, load_end - align_end);
|
||||||
|
}
|
||||||
|
load_addr = align_addr;
|
||||||
|
}
|
||||||
|
|
||||||
load_bias = load_addr - loaddr;
|
load_bias = load_addr - loaddr;
|
||||||
|
|
||||||
if (elf_is_fdpic(ehdr)) {
|
if (elf_is_fdpic(ehdr)) {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue