mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-04 16:23:55 -06:00
Darwin patch (initial patch by Pierre d'Herbemont)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@980 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
parent
1d43a71773
commit
83fb7adf6c
14 changed files with 185 additions and 106 deletions
|
@ -13,6 +13,16 @@
|
|||
#include "qemu.h"
|
||||
#include "disas.h"
|
||||
|
||||
/* this flag is uneffective under linux too, should be deleted */
|
||||
#ifndef MAP_DENYWRITE
|
||||
#define MAP_DENYWRITE 0
|
||||
#endif
|
||||
|
||||
/* should probably go in elf.h */
|
||||
#ifndef ELIBBAD
|
||||
#define ELIBBAD 80
|
||||
#endif
|
||||
|
||||
#ifdef TARGET_I386
|
||||
|
||||
#define ELF_START_MMAP 0x80000000
|
||||
|
@ -332,7 +342,7 @@ static void * get_free_page(void)
|
|||
/* User-space version of kernel get_free_page. Returns a page-aligned
|
||||
* page-sized chunk of memory.
|
||||
*/
|
||||
retval = (void *)target_mmap(0, host_page_size, PROT_READ|PROT_WRITE,
|
||||
retval = (void *)target_mmap(0, qemu_host_page_size, PROT_READ|PROT_WRITE,
|
||||
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||
|
||||
if((long)retval == -1) {
|
||||
|
@ -346,7 +356,7 @@ static void * get_free_page(void)
|
|||
|
||||
static void free_page(void * pageaddr)
|
||||
{
|
||||
target_munmap((unsigned long)pageaddr, host_page_size);
|
||||
target_munmap((unsigned long)pageaddr, qemu_host_page_size);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -502,7 +512,7 @@ unsigned long setup_arg_pages(unsigned long p, struct linux_binprm * bprm,
|
|||
if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
|
||||
size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
|
||||
error = target_mmap(0,
|
||||
size + host_page_size,
|
||||
size + qemu_host_page_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS,
|
||||
-1, 0);
|
||||
|
@ -511,7 +521,7 @@ unsigned long setup_arg_pages(unsigned long p, struct linux_binprm * bprm,
|
|||
exit(-1);
|
||||
}
|
||||
/* we reserve one extra page at the top of the stack as guard */
|
||||
target_mprotect(error + size, host_page_size, PROT_NONE);
|
||||
target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
|
||||
|
||||
stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
|
||||
p += stack_base;
|
||||
|
@ -562,10 +572,10 @@ static void padzero(unsigned long elf_bss)
|
|||
of the file may not be mapped. A better fix would be to
|
||||
patch target_mmap(), but it is more complicated as the file
|
||||
size must be known */
|
||||
if (real_host_page_size < host_page_size) {
|
||||
if (qemu_real_host_page_size < qemu_host_page_size) {
|
||||
unsigned long end_addr, end_addr1;
|
||||
end_addr1 = (elf_bss + real_host_page_size - 1) &
|
||||
~(real_host_page_size - 1);
|
||||
end_addr1 = (elf_bss + qemu_real_host_page_size - 1) &
|
||||
~(qemu_real_host_page_size - 1);
|
||||
end_addr = HOST_PAGE_ALIGN(elf_bss);
|
||||
if (end_addr1 < end_addr) {
|
||||
mmap((void *)end_addr1, end_addr - end_addr1,
|
||||
|
@ -574,9 +584,9 @@ static void padzero(unsigned long elf_bss)
|
|||
}
|
||||
}
|
||||
|
||||
nbyte = elf_bss & (host_page_size-1);
|
||||
nbyte = elf_bss & (qemu_host_page_size-1);
|
||||
if (nbyte) {
|
||||
nbyte = host_page_size - nbyte;
|
||||
nbyte = qemu_host_page_size - nbyte;
|
||||
fpnt = (char *) elf_bss;
|
||||
do {
|
||||
*fpnt++ = 0;
|
||||
|
@ -811,7 +821,7 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
|
|||
* bss page.
|
||||
*/
|
||||
padzero(elf_bss);
|
||||
elf_bss = TARGET_ELF_PAGESTART(elf_bss + host_page_size - 1); /* What we have mapped so far */
|
||||
elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
|
||||
|
||||
/* Map the last of the bss segment */
|
||||
if (last_bss > elf_bss) {
|
||||
|
@ -1252,7 +1262,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * r
|
|||
and some applications "depend" upon this behavior.
|
||||
Since we do not have the power to recompile these, we
|
||||
emulate the SVr4 behavior. Sigh. */
|
||||
mapped_addr = target_mmap(0, host_page_size, PROT_READ | PROT_EXEC,
|
||||
mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
|
||||
MAP_FIXED | MAP_PRIVATE, -1, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,11 @@
|
|||
|
||||
#define DEBUG_LOGFILE "/tmp/qemu.log"
|
||||
|
||||
#ifdef __APPLE__
|
||||
#include <crt_externs.h>
|
||||
# define environ (*_NSGetEnviron())
|
||||
#endif
|
||||
|
||||
static const char *interp_prefix = CONFIG_QEMU_PREFIX;
|
||||
|
||||
#if defined(__i386__) && !defined(CONFIG_STATIC)
|
||||
|
@ -977,9 +982,9 @@ int main(int argc, char **argv)
|
|||
} else if (!strcmp(r, "L")) {
|
||||
interp_prefix = argv[optind++];
|
||||
} else if (!strcmp(r, "p")) {
|
||||
host_page_size = atoi(argv[optind++]);
|
||||
if (host_page_size == 0 ||
|
||||
(host_page_size & (host_page_size - 1)) != 0) {
|
||||
qemu_host_page_size = atoi(argv[optind++]);
|
||||
if (qemu_host_page_size == 0 ||
|
||||
(qemu_host_page_size & (qemu_host_page_size - 1)) != 0) {
|
||||
fprintf(stderr, "page size must be a power of two\n");
|
||||
exit(1);
|
||||
}
|
||||
|
@ -1006,8 +1011,8 @@ int main(int argc, char **argv)
|
|||
/* Scan interp_prefix dir for replacement files. */
|
||||
init_paths(interp_prefix);
|
||||
|
||||
/* NOTE: we need to init the CPU at this stage to get the
|
||||
host_page_size */
|
||||
/* NOTE: we need to init the CPU at this stage to get
|
||||
qemu_host_page_size */
|
||||
env = cpu_init();
|
||||
|
||||
if (elf_exec(filename, argv+optind, environ, regs, info) != 0) {
|
||||
|
|
|
@ -53,7 +53,7 @@ int target_mprotect(unsigned long start, unsigned long len, int prot)
|
|||
if (len == 0)
|
||||
return 0;
|
||||
|
||||
host_start = start & host_page_mask;
|
||||
host_start = start & qemu_host_page_mask;
|
||||
host_end = HOST_PAGE_ALIGN(end);
|
||||
if (start > host_start) {
|
||||
/* handle host page containing start */
|
||||
|
@ -61,27 +61,27 @@ int target_mprotect(unsigned long start, unsigned long len, int prot)
|
|||
for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
|
||||
prot1 |= page_get_flags(addr);
|
||||
}
|
||||
if (host_end == host_start + host_page_size) {
|
||||
if (host_end == host_start + qemu_host_page_size) {
|
||||
for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
|
||||
prot1 |= page_get_flags(addr);
|
||||
}
|
||||
end = host_end;
|
||||
}
|
||||
ret = mprotect((void *)host_start, host_page_size, prot1 & PAGE_BITS);
|
||||
ret = mprotect((void *)host_start, qemu_host_page_size, prot1 & PAGE_BITS);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
host_start += host_page_size;
|
||||
host_start += qemu_host_page_size;
|
||||
}
|
||||
if (end < host_end) {
|
||||
prot1 = prot;
|
||||
for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
|
||||
prot1 |= page_get_flags(addr);
|
||||
}
|
||||
ret = mprotect((void *)(host_end - host_page_size), host_page_size,
|
||||
ret = mprotect((void *)(host_end - qemu_host_page_size), qemu_host_page_size,
|
||||
prot1 & PAGE_BITS);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
host_end -= host_page_size;
|
||||
host_end -= qemu_host_page_size;
|
||||
}
|
||||
|
||||
/* handle the pages in the middle */
|
||||
|
@ -102,7 +102,7 @@ int mmap_frag(unsigned long host_start,
|
|||
unsigned long host_end, ret, addr;
|
||||
int prot1, prot_new;
|
||||
|
||||
host_end = host_start + host_page_size;
|
||||
host_end = host_start + qemu_host_page_size;
|
||||
|
||||
/* get the protection of the target pages outside the mapping */
|
||||
prot1 = 0;
|
||||
|
@ -113,7 +113,7 @@ int mmap_frag(unsigned long host_start,
|
|||
|
||||
if (prot1 == 0) {
|
||||
/* no page was there, so we allocate one */
|
||||
ret = (long)mmap((void *)host_start, host_page_size, prot,
|
||||
ret = (long)mmap((void *)host_start, qemu_host_page_size, prot,
|
||||
flags | MAP_ANONYMOUS, -1, 0);
|
||||
if (ret == -1)
|
||||
return ret;
|
||||
|
@ -130,18 +130,18 @@ int mmap_frag(unsigned long host_start,
|
|||
|
||||
/* adjust protection to be able to read */
|
||||
if (!(prot1 & PROT_WRITE))
|
||||
mprotect((void *)host_start, host_page_size, prot1 | PROT_WRITE);
|
||||
mprotect((void *)host_start, qemu_host_page_size, prot1 | PROT_WRITE);
|
||||
|
||||
/* read the corresponding file data */
|
||||
pread(fd, (void *)start, end - start, offset);
|
||||
|
||||
/* put final protection */
|
||||
if (prot_new != (prot1 | PROT_WRITE))
|
||||
mprotect((void *)host_start, host_page_size, prot_new);
|
||||
mprotect((void *)host_start, qemu_host_page_size, prot_new);
|
||||
} else {
|
||||
/* just update the protection */
|
||||
if (prot_new != prot1) {
|
||||
mprotect((void *)host_start, host_page_size, prot_new);
|
||||
mprotect((void *)host_start, qemu_host_page_size, prot_new);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -188,7 +188,7 @@ long target_mmap(unsigned long start, unsigned long len, int prot,
|
|||
len = TARGET_PAGE_ALIGN(len);
|
||||
if (len == 0)
|
||||
return start;
|
||||
host_start = start & host_page_mask;
|
||||
host_start = start & qemu_host_page_mask;
|
||||
|
||||
if (!(flags & MAP_FIXED)) {
|
||||
#if defined(__alpha__) || defined(__sparc__) || defined(__x86_64__)
|
||||
|
@ -198,10 +198,10 @@ long target_mmap(unsigned long start, unsigned long len, int prot,
|
|||
last_start += HOST_PAGE_ALIGN(len);
|
||||
}
|
||||
#endif
|
||||
if (host_page_size != real_host_page_size) {
|
||||
if (qemu_host_page_size != qemu_real_host_page_size) {
|
||||
/* NOTE: this code is only for debugging with '-p' option */
|
||||
/* reserve a memory area */
|
||||
host_len = HOST_PAGE_ALIGN(len) + host_page_size - TARGET_PAGE_SIZE;
|
||||
host_len = HOST_PAGE_ALIGN(len) + qemu_host_page_size - TARGET_PAGE_SIZE;
|
||||
host_start = (long)mmap((void *)host_start, host_len, PROT_NONE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
if (host_start == -1)
|
||||
|
@ -217,7 +217,7 @@ long target_mmap(unsigned long start, unsigned long len, int prot,
|
|||
flags |= MAP_FIXED;
|
||||
} else {
|
||||
/* if not fixed, no need to do anything */
|
||||
host_offset = offset & host_page_mask;
|
||||
host_offset = offset & qemu_host_page_mask;
|
||||
host_len = len + offset - host_offset;
|
||||
start = (long)mmap((void *)host_start, host_len,
|
||||
prot, flags, fd, host_offset);
|
||||
|
@ -238,7 +238,7 @@ long target_mmap(unsigned long start, unsigned long len, int prot,
|
|||
/* worst case: we cannot map the file because the offset is not
|
||||
aligned, so we read it */
|
||||
if (!(flags & MAP_ANONYMOUS) &&
|
||||
(offset & ~host_page_mask) != (start & ~host_page_mask)) {
|
||||
(offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
|
||||
/* msync() won't work here, so we return an error if write is
|
||||
possible while it is a shared mapping */
|
||||
if ((flags & MAP_TYPE) == MAP_SHARED &&
|
||||
|
@ -260,7 +260,7 @@ long target_mmap(unsigned long start, unsigned long len, int prot,
|
|||
|
||||
/* handle the start of the mapping */
|
||||
if (start > host_start) {
|
||||
if (host_end == host_start + host_page_size) {
|
||||
if (host_end == host_start + qemu_host_page_size) {
|
||||
/* one single host page */
|
||||
ret = mmap_frag(host_start, start, end,
|
||||
prot, flags, fd, offset);
|
||||
|
@ -268,21 +268,21 @@ long target_mmap(unsigned long start, unsigned long len, int prot,
|
|||
return ret;
|
||||
goto the_end1;
|
||||
}
|
||||
ret = mmap_frag(host_start, start, host_start + host_page_size,
|
||||
ret = mmap_frag(host_start, start, host_start + qemu_host_page_size,
|
||||
prot, flags, fd, offset);
|
||||
if (ret == -1)
|
||||
return ret;
|
||||
host_start += host_page_size;
|
||||
host_start += qemu_host_page_size;
|
||||
}
|
||||
/* handle the end of the mapping */
|
||||
if (end < host_end) {
|
||||
ret = mmap_frag(host_end - host_page_size,
|
||||
host_end - host_page_size, host_end,
|
||||
ret = mmap_frag(host_end - qemu_host_page_size,
|
||||
host_end - qemu_host_page_size, host_end,
|
||||
prot, flags, fd,
|
||||
offset + host_end - host_page_size - start);
|
||||
offset + host_end - qemu_host_page_size - start);
|
||||
if (ret == -1)
|
||||
return ret;
|
||||
host_end -= host_page_size;
|
||||
host_end -= qemu_host_page_size;
|
||||
}
|
||||
|
||||
/* map the middle (easier) */
|
||||
|
@ -322,7 +322,7 @@ int target_munmap(unsigned long start, unsigned long len)
|
|||
if (len == 0)
|
||||
return -EINVAL;
|
||||
end = start + len;
|
||||
host_start = start & host_page_mask;
|
||||
host_start = start & qemu_host_page_mask;
|
||||
host_end = HOST_PAGE_ALIGN(end);
|
||||
|
||||
if (start > host_start) {
|
||||
|
@ -331,14 +331,14 @@ int target_munmap(unsigned long start, unsigned long len)
|
|||
for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
|
||||
prot |= page_get_flags(addr);
|
||||
}
|
||||
if (host_end == host_start + host_page_size) {
|
||||
if (host_end == host_start + qemu_host_page_size) {
|
||||
for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
|
||||
prot |= page_get_flags(addr);
|
||||
}
|
||||
end = host_end;
|
||||
}
|
||||
if (prot != 0)
|
||||
host_start += host_page_size;
|
||||
host_start += qemu_host_page_size;
|
||||
}
|
||||
if (end < host_end) {
|
||||
prot = 0;
|
||||
|
@ -346,7 +346,7 @@ int target_munmap(unsigned long start, unsigned long len)
|
|||
prot |= page_get_flags(addr);
|
||||
}
|
||||
if (prot != 0)
|
||||
host_end -= host_page_size;
|
||||
host_end -= qemu_host_page_size;
|
||||
}
|
||||
|
||||
/* unmap what we can */
|
||||
|
@ -391,7 +391,7 @@ int target_msync(unsigned long start, unsigned long len, int flags)
|
|||
if (end == start)
|
||||
return 0;
|
||||
|
||||
start &= host_page_mask;
|
||||
start &= qemu_host_page_mask;
|
||||
return msync((void *)start, end - start, flags);
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue