mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-08 02:03:56 -06:00
Merge remote-tracking branch 'qmp/queue/qmp' into staging
* qmp/queue/qmp: (29 commits) Add 'query-events' command to QMP to query async events qapi: convert netdev_del qapi: convert netdev_add net: net_client_init(): use error_set() net: purge the monitor object from all init functions qemu-config: introduce qemu_find_opts_err() qemu-config: find_list(): use error_set() qerror: introduce QERR_INVALID_OPTION_GROUP qemu-option: qemu_opts_from_qdict(): use error_set() qemu-option: introduce qemu_opt_set_err() qemu-option: opt_set(): use error_set() qemu-option: qemu_opts_validate(): use error_set() qemu-option: qemu_opt_parse(): use error_set() qemu-option: parse_option_size(): use error_set() qemu-option: parse_option_bool(): use error_set() qemu-option: parse_option_number(): use error_set() qemu-option: qemu_opts_create(): use error_set() introduce a new monitor command 'dump-guest-memory' to dump guest's memory make gdb_id() generally avialable and rename it to cpu_index() target-i386: Add API to get note's size ...
This commit is contained in:
commit
349417004a
49 changed files with 2693 additions and 255 deletions
449
target-i386/arch_dump.c
Normal file
449
target-i386/arch_dump.c
Normal file
|
@ -0,0 +1,449 @@
|
|||
/*
|
||||
* i386 memory mapping
|
||||
*
|
||||
* Copyright Fujitsu, Corp. 2011, 2012
|
||||
*
|
||||
* Authors:
|
||||
* Wen Congyang <wency@cn.fujitsu.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2. See
|
||||
* the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "cpu.h"
|
||||
#include "cpu-all.h"
|
||||
#include "dump.h"
|
||||
#include "elf.h"
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
typedef struct {
|
||||
target_ulong r15, r14, r13, r12, rbp, rbx, r11, r10;
|
||||
target_ulong r9, r8, rax, rcx, rdx, rsi, rdi, orig_rax;
|
||||
target_ulong rip, cs, eflags;
|
||||
target_ulong rsp, ss;
|
||||
target_ulong fs_base, gs_base;
|
||||
target_ulong ds, es, fs, gs;
|
||||
} x86_64_user_regs_struct;
|
||||
|
||||
typedef struct {
|
||||
char pad1[32];
|
||||
uint32_t pid;
|
||||
char pad2[76];
|
||||
x86_64_user_regs_struct regs;
|
||||
char pad3[8];
|
||||
} x86_64_elf_prstatus;
|
||||
|
||||
static int x86_64_write_elf64_note(write_core_dump_function f,
|
||||
CPUArchState *env, int id,
|
||||
void *opaque)
|
||||
{
|
||||
x86_64_user_regs_struct regs;
|
||||
Elf64_Nhdr *note;
|
||||
char *buf;
|
||||
int descsz, note_size, name_size = 5;
|
||||
const char *name = "CORE";
|
||||
int ret;
|
||||
|
||||
regs.r15 = env->regs[15];
|
||||
regs.r14 = env->regs[14];
|
||||
regs.r13 = env->regs[13];
|
||||
regs.r12 = env->regs[12];
|
||||
regs.r11 = env->regs[11];
|
||||
regs.r10 = env->regs[10];
|
||||
regs.r9 = env->regs[9];
|
||||
regs.r8 = env->regs[8];
|
||||
regs.rbp = env->regs[R_EBP];
|
||||
regs.rsp = env->regs[R_ESP];
|
||||
regs.rdi = env->regs[R_EDI];
|
||||
regs.rsi = env->regs[R_ESI];
|
||||
regs.rdx = env->regs[R_EDX];
|
||||
regs.rcx = env->regs[R_ECX];
|
||||
regs.rbx = env->regs[R_EBX];
|
||||
regs.rax = env->regs[R_EAX];
|
||||
regs.rip = env->eip;
|
||||
regs.eflags = env->eflags;
|
||||
|
||||
regs.orig_rax = 0; /* FIXME */
|
||||
regs.cs = env->segs[R_CS].selector;
|
||||
regs.ss = env->segs[R_SS].selector;
|
||||
regs.fs_base = env->segs[R_FS].base;
|
||||
regs.gs_base = env->segs[R_GS].base;
|
||||
regs.ds = env->segs[R_DS].selector;
|
||||
regs.es = env->segs[R_ES].selector;
|
||||
regs.fs = env->segs[R_FS].selector;
|
||||
regs.gs = env->segs[R_GS].selector;
|
||||
|
||||
descsz = sizeof(x86_64_elf_prstatus);
|
||||
note_size = ((sizeof(Elf64_Nhdr) + 3) / 4 + (name_size + 3) / 4 +
|
||||
(descsz + 3) / 4) * 4;
|
||||
note = g_malloc(note_size);
|
||||
|
||||
memset(note, 0, note_size);
|
||||
note->n_namesz = cpu_to_le32(name_size);
|
||||
note->n_descsz = cpu_to_le32(descsz);
|
||||
note->n_type = cpu_to_le32(NT_PRSTATUS);
|
||||
buf = (char *)note;
|
||||
buf += ((sizeof(Elf64_Nhdr) + 3) / 4) * 4;
|
||||
memcpy(buf, name, name_size);
|
||||
buf += ((name_size + 3) / 4) * 4;
|
||||
memcpy(buf + 32, &id, 4); /* pr_pid */
|
||||
buf += descsz - sizeof(x86_64_user_regs_struct)-sizeof(target_ulong);
|
||||
memcpy(buf, ®s, sizeof(x86_64_user_regs_struct));
|
||||
|
||||
ret = f(note, note_size, opaque);
|
||||
g_free(note);
|
||||
if (ret < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
uint32_t ebx, ecx, edx, esi, edi, ebp, eax;
|
||||
unsigned short ds, __ds, es, __es;
|
||||
unsigned short fs, __fs, gs, __gs;
|
||||
uint32_t orig_eax, eip;
|
||||
unsigned short cs, __cs;
|
||||
uint32_t eflags, esp;
|
||||
unsigned short ss, __ss;
|
||||
} x86_user_regs_struct;
|
||||
|
||||
typedef struct {
|
||||
char pad1[24];
|
||||
uint32_t pid;
|
||||
char pad2[44];
|
||||
x86_user_regs_struct regs;
|
||||
char pad3[4];
|
||||
} x86_elf_prstatus;
|
||||
|
||||
static void x86_fill_elf_prstatus(x86_elf_prstatus *prstatus, CPUArchState *env,
|
||||
int id)
|
||||
{
|
||||
memset(prstatus, 0, sizeof(x86_elf_prstatus));
|
||||
prstatus->regs.ebp = env->regs[R_EBP] & 0xffffffff;
|
||||
prstatus->regs.esp = env->regs[R_ESP] & 0xffffffff;
|
||||
prstatus->regs.edi = env->regs[R_EDI] & 0xffffffff;
|
||||
prstatus->regs.esi = env->regs[R_ESI] & 0xffffffff;
|
||||
prstatus->regs.edx = env->regs[R_EDX] & 0xffffffff;
|
||||
prstatus->regs.ecx = env->regs[R_ECX] & 0xffffffff;
|
||||
prstatus->regs.ebx = env->regs[R_EBX] & 0xffffffff;
|
||||
prstatus->regs.eax = env->regs[R_EAX] & 0xffffffff;
|
||||
prstatus->regs.eip = env->eip & 0xffffffff;
|
||||
prstatus->regs.eflags = env->eflags & 0xffffffff;
|
||||
|
||||
prstatus->regs.cs = env->segs[R_CS].selector;
|
||||
prstatus->regs.ss = env->segs[R_SS].selector;
|
||||
prstatus->regs.ds = env->segs[R_DS].selector;
|
||||
prstatus->regs.es = env->segs[R_ES].selector;
|
||||
prstatus->regs.fs = env->segs[R_FS].selector;
|
||||
prstatus->regs.gs = env->segs[R_GS].selector;
|
||||
|
||||
prstatus->pid = id;
|
||||
}
|
||||
|
||||
static int x86_write_elf64_note(write_core_dump_function f, CPUArchState *env,
|
||||
int id, void *opaque)
|
||||
{
|
||||
x86_elf_prstatus prstatus;
|
||||
Elf64_Nhdr *note;
|
||||
char *buf;
|
||||
int descsz, note_size, name_size = 5;
|
||||
const char *name = "CORE";
|
||||
int ret;
|
||||
|
||||
x86_fill_elf_prstatus(&prstatus, env, id);
|
||||
descsz = sizeof(x86_elf_prstatus);
|
||||
note_size = ((sizeof(Elf64_Nhdr) + 3) / 4 + (name_size + 3) / 4 +
|
||||
(descsz + 3) / 4) * 4;
|
||||
note = g_malloc(note_size);
|
||||
|
||||
memset(note, 0, note_size);
|
||||
note->n_namesz = cpu_to_le32(name_size);
|
||||
note->n_descsz = cpu_to_le32(descsz);
|
||||
note->n_type = cpu_to_le32(NT_PRSTATUS);
|
||||
buf = (char *)note;
|
||||
buf += ((sizeof(Elf64_Nhdr) + 3) / 4) * 4;
|
||||
memcpy(buf, name, name_size);
|
||||
buf += ((name_size + 3) / 4) * 4;
|
||||
memcpy(buf, &prstatus, sizeof(prstatus));
|
||||
|
||||
ret = f(note, note_size, opaque);
|
||||
g_free(note);
|
||||
if (ret < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cpu_write_elf64_note(write_core_dump_function f, CPUArchState *env,
|
||||
int cpuid, void *opaque)
|
||||
{
|
||||
int ret;
|
||||
#ifdef TARGET_X86_64
|
||||
bool lma = !!(first_cpu->hflags & HF_LMA_MASK);
|
||||
|
||||
if (lma) {
|
||||
ret = x86_64_write_elf64_note(f, env, cpuid, opaque);
|
||||
} else {
|
||||
#endif
|
||||
ret = x86_write_elf64_note(f, env, cpuid, opaque);
|
||||
#ifdef TARGET_X86_64
|
||||
}
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int cpu_write_elf32_note(write_core_dump_function f, CPUArchState *env,
|
||||
int cpuid, void *opaque)
|
||||
{
|
||||
x86_elf_prstatus prstatus;
|
||||
Elf32_Nhdr *note;
|
||||
char *buf;
|
||||
int descsz, note_size, name_size = 5;
|
||||
const char *name = "CORE";
|
||||
int ret;
|
||||
|
||||
x86_fill_elf_prstatus(&prstatus, env, cpuid);
|
||||
descsz = sizeof(x86_elf_prstatus);
|
||||
note_size = ((sizeof(Elf32_Nhdr) + 3) / 4 + (name_size + 3) / 4 +
|
||||
(descsz + 3) / 4) * 4;
|
||||
note = g_malloc(note_size);
|
||||
|
||||
memset(note, 0, note_size);
|
||||
note->n_namesz = cpu_to_le32(name_size);
|
||||
note->n_descsz = cpu_to_le32(descsz);
|
||||
note->n_type = cpu_to_le32(NT_PRSTATUS);
|
||||
buf = (char *)note;
|
||||
buf += ((sizeof(Elf32_Nhdr) + 3) / 4) * 4;
|
||||
memcpy(buf, name, name_size);
|
||||
buf += ((name_size + 3) / 4) * 4;
|
||||
memcpy(buf, &prstatus, sizeof(prstatus));
|
||||
|
||||
ret = f(note, note_size, opaque);
|
||||
g_free(note);
|
||||
if (ret < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* please count up QEMUCPUSTATE_VERSION if you have changed definition of
|
||||
* QEMUCPUState, and modify the tools using this information accordingly.
|
||||
*/
|
||||
#define QEMUCPUSTATE_VERSION (1)
|
||||
|
||||
struct QEMUCPUSegment {
|
||||
uint32_t selector;
|
||||
uint32_t limit;
|
||||
uint32_t flags;
|
||||
uint32_t pad;
|
||||
uint64_t base;
|
||||
};
|
||||
|
||||
typedef struct QEMUCPUSegment QEMUCPUSegment;
|
||||
|
||||
struct QEMUCPUState {
|
||||
uint32_t version;
|
||||
uint32_t size;
|
||||
uint64_t rax, rbx, rcx, rdx, rsi, rdi, rsp, rbp;
|
||||
uint64_t r8, r9, r10, r11, r12, r13, r14, r15;
|
||||
uint64_t rip, rflags;
|
||||
QEMUCPUSegment cs, ds, es, fs, gs, ss;
|
||||
QEMUCPUSegment ldt, tr, gdt, idt;
|
||||
uint64_t cr[5];
|
||||
};
|
||||
|
||||
typedef struct QEMUCPUState QEMUCPUState;
|
||||
|
||||
static void copy_segment(QEMUCPUSegment *d, SegmentCache *s)
|
||||
{
|
||||
d->pad = 0;
|
||||
d->selector = s->selector;
|
||||
d->limit = s->limit;
|
||||
d->flags = s->flags;
|
||||
d->base = s->base;
|
||||
}
|
||||
|
||||
static void qemu_get_cpustate(QEMUCPUState *s, CPUArchState *env)
|
||||
{
|
||||
memset(s, 0, sizeof(QEMUCPUState));
|
||||
|
||||
s->version = QEMUCPUSTATE_VERSION;
|
||||
s->size = sizeof(QEMUCPUState);
|
||||
|
||||
s->rax = env->regs[R_EAX];
|
||||
s->rbx = env->regs[R_EBX];
|
||||
s->rcx = env->regs[R_ECX];
|
||||
s->rdx = env->regs[R_EDX];
|
||||
s->rsi = env->regs[R_ESI];
|
||||
s->rdi = env->regs[R_EDI];
|
||||
s->rsp = env->regs[R_ESP];
|
||||
s->rbp = env->regs[R_EBP];
|
||||
#ifdef TARGET_X86_64
|
||||
s->r8 = env->regs[8];
|
||||
s->r9 = env->regs[9];
|
||||
s->r10 = env->regs[10];
|
||||
s->r11 = env->regs[11];
|
||||
s->r12 = env->regs[12];
|
||||
s->r13 = env->regs[13];
|
||||
s->r14 = env->regs[14];
|
||||
s->r15 = env->regs[15];
|
||||
#endif
|
||||
s->rip = env->eip;
|
||||
s->rflags = env->eflags;
|
||||
|
||||
copy_segment(&s->cs, &env->segs[R_CS]);
|
||||
copy_segment(&s->ds, &env->segs[R_DS]);
|
||||
copy_segment(&s->es, &env->segs[R_ES]);
|
||||
copy_segment(&s->fs, &env->segs[R_FS]);
|
||||
copy_segment(&s->gs, &env->segs[R_GS]);
|
||||
copy_segment(&s->ss, &env->segs[R_SS]);
|
||||
copy_segment(&s->ldt, &env->ldt);
|
||||
copy_segment(&s->tr, &env->tr);
|
||||
copy_segment(&s->gdt, &env->gdt);
|
||||
copy_segment(&s->idt, &env->idt);
|
||||
|
||||
s->cr[0] = env->cr[0];
|
||||
s->cr[1] = env->cr[1];
|
||||
s->cr[2] = env->cr[2];
|
||||
s->cr[3] = env->cr[3];
|
||||
s->cr[4] = env->cr[4];
|
||||
}
|
||||
|
||||
static inline int cpu_write_qemu_note(write_core_dump_function f,
|
||||
CPUArchState *env,
|
||||
void *opaque,
|
||||
int type)
|
||||
{
|
||||
QEMUCPUState state;
|
||||
Elf64_Nhdr *note64;
|
||||
Elf32_Nhdr *note32;
|
||||
void *note;
|
||||
char *buf;
|
||||
int descsz, note_size, name_size = 5, note_head_size;
|
||||
const char *name = "QEMU";
|
||||
int ret;
|
||||
|
||||
qemu_get_cpustate(&state, env);
|
||||
|
||||
descsz = sizeof(state);
|
||||
if (type == 0) {
|
||||
note_head_size = sizeof(Elf32_Nhdr);
|
||||
} else {
|
||||
note_head_size = sizeof(Elf64_Nhdr);
|
||||
}
|
||||
note_size = ((note_head_size + 3) / 4 + (name_size + 3) / 4 +
|
||||
(descsz + 3) / 4) * 4;
|
||||
note = g_malloc(note_size);
|
||||
|
||||
memset(note, 0, note_size);
|
||||
if (type == 0) {
|
||||
note32 = note;
|
||||
note32->n_namesz = cpu_to_le32(name_size);
|
||||
note32->n_descsz = cpu_to_le32(descsz);
|
||||
note32->n_type = 0;
|
||||
} else {
|
||||
note64 = note;
|
||||
note64->n_namesz = cpu_to_le32(name_size);
|
||||
note64->n_descsz = cpu_to_le32(descsz);
|
||||
note64->n_type = 0;
|
||||
}
|
||||
buf = note;
|
||||
buf += ((note_head_size + 3) / 4) * 4;
|
||||
memcpy(buf, name, name_size);
|
||||
buf += ((name_size + 3) / 4) * 4;
|
||||
memcpy(buf, &state, sizeof(state));
|
||||
|
||||
ret = f(note, note_size, opaque);
|
||||
g_free(note);
|
||||
if (ret < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cpu_write_elf64_qemunote(write_core_dump_function f, CPUArchState *env,
|
||||
void *opaque)
|
||||
{
|
||||
return cpu_write_qemu_note(f, env, opaque, 1);
|
||||
}
|
||||
|
||||
int cpu_write_elf32_qemunote(write_core_dump_function f, CPUArchState *env,
|
||||
void *opaque)
|
||||
{
|
||||
return cpu_write_qemu_note(f, env, opaque, 0);
|
||||
}
|
||||
|
||||
int cpu_get_dump_info(ArchDumpInfo *info)
|
||||
{
|
||||
bool lma = false;
|
||||
RAMBlock *block;
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
lma = !!(first_cpu->hflags & HF_LMA_MASK);
|
||||
#endif
|
||||
|
||||
if (lma) {
|
||||
info->d_machine = EM_X86_64;
|
||||
} else {
|
||||
info->d_machine = EM_386;
|
||||
}
|
||||
info->d_endian = ELFDATA2LSB;
|
||||
|
||||
if (lma) {
|
||||
info->d_class = ELFCLASS64;
|
||||
} else {
|
||||
info->d_class = ELFCLASS32;
|
||||
|
||||
QLIST_FOREACH(block, &ram_list.blocks, next) {
|
||||
if (block->offset + block->length > UINT_MAX) {
|
||||
/* The memory size is greater than 4G */
|
||||
info->d_class = ELFCLASS64;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t cpu_get_note_size(int class, int machine, int nr_cpus)
|
||||
{
|
||||
int name_size = 5; /* "CORE" or "QEMU" */
|
||||
size_t elf_note_size = 0;
|
||||
size_t qemu_note_size = 0;
|
||||
int elf_desc_size = 0;
|
||||
int qemu_desc_size = 0;
|
||||
int note_head_size;
|
||||
|
||||
if (class == ELFCLASS32) {
|
||||
note_head_size = sizeof(Elf32_Nhdr);
|
||||
} else {
|
||||
note_head_size = sizeof(Elf64_Nhdr);
|
||||
}
|
||||
|
||||
if (machine == EM_386) {
|
||||
elf_desc_size = sizeof(x86_elf_prstatus);
|
||||
}
|
||||
#ifdef TARGET_X86_64
|
||||
else {
|
||||
elf_desc_size = sizeof(x86_64_elf_prstatus);
|
||||
}
|
||||
#endif
|
||||
qemu_desc_size = sizeof(QEMUCPUState);
|
||||
|
||||
elf_note_size = ((note_head_size + 3) / 4 + (name_size + 3) / 4 +
|
||||
(elf_desc_size + 3) / 4) * 4;
|
||||
qemu_note_size = ((note_head_size + 3) / 4 + (name_size + 3) / 4 +
|
||||
(qemu_desc_size + 3) / 4) * 4;
|
||||
|
||||
return (elf_note_size + qemu_note_size) * nr_cpus;
|
||||
}
|
271
target-i386/arch_memory_mapping.c
Normal file
271
target-i386/arch_memory_mapping.c
Normal file
|
@ -0,0 +1,271 @@
|
|||
/*
|
||||
* i386 memory mapping
|
||||
*
|
||||
* Copyright Fujitsu, Corp. 2011, 2012
|
||||
*
|
||||
* Authors:
|
||||
* Wen Congyang <wency@cn.fujitsu.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2. See
|
||||
* the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "cpu.h"
|
||||
#include "cpu-all.h"
|
||||
|
||||
/* PAE Paging or IA-32e Paging */
|
||||
static void walk_pte(MemoryMappingList *list, target_phys_addr_t pte_start_addr,
|
||||
int32_t a20_mask, target_ulong start_line_addr)
|
||||
{
|
||||
target_phys_addr_t pte_addr, start_paddr;
|
||||
uint64_t pte;
|
||||
target_ulong start_vaddr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 512; i++) {
|
||||
pte_addr = (pte_start_addr + i * 8) & a20_mask;
|
||||
pte = ldq_phys(pte_addr);
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
}
|
||||
|
||||
start_paddr = (pte & ~0xfff) & ~(0x1ULL << 63);
|
||||
if (cpu_physical_memory_is_io(start_paddr)) {
|
||||
/* I/O region */
|
||||
continue;
|
||||
}
|
||||
|
||||
start_vaddr = start_line_addr | ((i & 0x1fff) << 12);
|
||||
memory_mapping_list_add_merge_sorted(list, start_paddr,
|
||||
start_vaddr, 1 << 12);
|
||||
}
|
||||
}
|
||||
|
||||
/* 32-bit Paging */
|
||||
static void walk_pte2(MemoryMappingList *list,
|
||||
target_phys_addr_t pte_start_addr, int32_t a20_mask,
|
||||
target_ulong start_line_addr)
|
||||
{
|
||||
target_phys_addr_t pte_addr, start_paddr;
|
||||
uint32_t pte;
|
||||
target_ulong start_vaddr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 1024; i++) {
|
||||
pte_addr = (pte_start_addr + i * 4) & a20_mask;
|
||||
pte = ldl_phys(pte_addr);
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
}
|
||||
|
||||
start_paddr = pte & ~0xfff;
|
||||
if (cpu_physical_memory_is_io(start_paddr)) {
|
||||
/* I/O region */
|
||||
continue;
|
||||
}
|
||||
|
||||
start_vaddr = start_line_addr | ((i & 0x3ff) << 12);
|
||||
memory_mapping_list_add_merge_sorted(list, start_paddr,
|
||||
start_vaddr, 1 << 12);
|
||||
}
|
||||
}
|
||||
|
||||
/* PAE Paging or IA-32e Paging */
|
||||
static void walk_pde(MemoryMappingList *list, target_phys_addr_t pde_start_addr,
|
||||
int32_t a20_mask, target_ulong start_line_addr)
|
||||
{
|
||||
target_phys_addr_t pde_addr, pte_start_addr, start_paddr;
|
||||
uint64_t pde;
|
||||
target_ulong line_addr, start_vaddr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 512; i++) {
|
||||
pde_addr = (pde_start_addr + i * 8) & a20_mask;
|
||||
pde = ldq_phys(pde_addr);
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
}
|
||||
|
||||
line_addr = start_line_addr | ((i & 0x1ff) << 21);
|
||||
if (pde & PG_PSE_MASK) {
|
||||
/* 2 MB page */
|
||||
start_paddr = (pde & ~0x1fffff) & ~(0x1ULL << 63);
|
||||
if (cpu_physical_memory_is_io(start_paddr)) {
|
||||
/* I/O region */
|
||||
continue;
|
||||
}
|
||||
start_vaddr = line_addr;
|
||||
memory_mapping_list_add_merge_sorted(list, start_paddr,
|
||||
start_vaddr, 1 << 21);
|
||||
continue;
|
||||
}
|
||||
|
||||
pte_start_addr = (pde & ~0xfff) & a20_mask;
|
||||
walk_pte(list, pte_start_addr, a20_mask, line_addr);
|
||||
}
|
||||
}
|
||||
|
||||
/* 32-bit Paging */
|
||||
static void walk_pde2(MemoryMappingList *list,
|
||||
target_phys_addr_t pde_start_addr, int32_t a20_mask,
|
||||
bool pse)
|
||||
{
|
||||
target_phys_addr_t pde_addr, pte_start_addr, start_paddr;
|
||||
uint32_t pde;
|
||||
target_ulong line_addr, start_vaddr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 1024; i++) {
|
||||
pde_addr = (pde_start_addr + i * 4) & a20_mask;
|
||||
pde = ldl_phys(pde_addr);
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
}
|
||||
|
||||
line_addr = (((unsigned int)i & 0x3ff) << 22);
|
||||
if ((pde & PG_PSE_MASK) && pse) {
|
||||
/* 4 MB page */
|
||||
start_paddr = (pde & ~0x3fffff) | ((pde & 0x1fe000) << 19);
|
||||
if (cpu_physical_memory_is_io(start_paddr)) {
|
||||
/* I/O region */
|
||||
continue;
|
||||
}
|
||||
start_vaddr = line_addr;
|
||||
memory_mapping_list_add_merge_sorted(list, start_paddr,
|
||||
start_vaddr, 1 << 22);
|
||||
continue;
|
||||
}
|
||||
|
||||
pte_start_addr = (pde & ~0xfff) & a20_mask;
|
||||
walk_pte2(list, pte_start_addr, a20_mask, line_addr);
|
||||
}
|
||||
}
|
||||
|
||||
/* PAE Paging */
|
||||
static void walk_pdpe2(MemoryMappingList *list,
|
||||
target_phys_addr_t pdpe_start_addr, int32_t a20_mask)
|
||||
{
|
||||
target_phys_addr_t pdpe_addr, pde_start_addr;
|
||||
uint64_t pdpe;
|
||||
target_ulong line_addr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
|
||||
pdpe = ldq_phys(pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
}
|
||||
|
||||
line_addr = (((unsigned int)i & 0x3) << 30);
|
||||
pde_start_addr = (pdpe & ~0xfff) & a20_mask;
|
||||
walk_pde(list, pde_start_addr, a20_mask, line_addr);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
/* IA-32e Paging */
|
||||
static void walk_pdpe(MemoryMappingList *list,
|
||||
target_phys_addr_t pdpe_start_addr, int32_t a20_mask,
|
||||
target_ulong start_line_addr)
|
||||
{
|
||||
target_phys_addr_t pdpe_addr, pde_start_addr, start_paddr;
|
||||
uint64_t pdpe;
|
||||
target_ulong line_addr, start_vaddr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 512; i++) {
|
||||
pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
|
||||
pdpe = ldq_phys(pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
}
|
||||
|
||||
line_addr = start_line_addr | ((i & 0x1ffULL) << 30);
|
||||
if (pdpe & PG_PSE_MASK) {
|
||||
/* 1 GB page */
|
||||
start_paddr = (pdpe & ~0x3fffffff) & ~(0x1ULL << 63);
|
||||
if (cpu_physical_memory_is_io(start_paddr)) {
|
||||
/* I/O region */
|
||||
continue;
|
||||
}
|
||||
start_vaddr = line_addr;
|
||||
memory_mapping_list_add_merge_sorted(list, start_paddr,
|
||||
start_vaddr, 1 << 30);
|
||||
continue;
|
||||
}
|
||||
|
||||
pde_start_addr = (pdpe & ~0xfff) & a20_mask;
|
||||
walk_pde(list, pde_start_addr, a20_mask, line_addr);
|
||||
}
|
||||
}
|
||||
|
||||
/* IA-32e Paging */
|
||||
static void walk_pml4e(MemoryMappingList *list,
|
||||
target_phys_addr_t pml4e_start_addr, int32_t a20_mask)
|
||||
{
|
||||
target_phys_addr_t pml4e_addr, pdpe_start_addr;
|
||||
uint64_t pml4e;
|
||||
target_ulong line_addr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 512; i++) {
|
||||
pml4e_addr = (pml4e_start_addr + i * 8) & a20_mask;
|
||||
pml4e = ldq_phys(pml4e_addr);
|
||||
if (!(pml4e & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
}
|
||||
|
||||
line_addr = ((i & 0x1ffULL) << 39) | (0xffffULL << 48);
|
||||
pdpe_start_addr = (pml4e & ~0xfff) & a20_mask;
|
||||
walk_pdpe(list, pdpe_start_addr, a20_mask, line_addr);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
int cpu_get_memory_mapping(MemoryMappingList *list, CPUArchState *env)
|
||||
{
|
||||
if (!cpu_paging_enabled(env)) {
|
||||
/* paging is disabled */
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (env->cr[4] & CR4_PAE_MASK) {
|
||||
#ifdef TARGET_X86_64
|
||||
if (env->hflags & HF_LMA_MASK) {
|
||||
target_phys_addr_t pml4e_addr;
|
||||
|
||||
pml4e_addr = (env->cr[3] & ~0xfff) & env->a20_mask;
|
||||
walk_pml4e(list, pml4e_addr, env->a20_mask);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
target_phys_addr_t pdpe_addr;
|
||||
|
||||
pdpe_addr = (env->cr[3] & ~0x1f) & env->a20_mask;
|
||||
walk_pdpe2(list, pdpe_addr, env->a20_mask);
|
||||
}
|
||||
} else {
|
||||
target_phys_addr_t pde_addr;
|
||||
bool pse;
|
||||
|
||||
pde_addr = (env->cr[3] & ~0xfff) & env->a20_mask;
|
||||
pse = !!(env->cr[4] & CR4_PSE_MASK);
|
||||
walk_pde2(list, pde_addr, env->a20_mask, pse);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool cpu_paging_enabled(CPUArchState *env)
|
||||
{
|
||||
return env->cr[0] & CR0_PG_MASK;
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue