mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-31 14:02:05 -06:00
* KVM error improvement from Laurent
* CONFIG_PARALLEL fix from Mirek * Atomic/optimized dirty bitmap access from myself and Stefan * BUILD_DIR convenience/bugfix from Peter C * Memory leak fix from Shannon * SMM improvements (though still TCG only) from myself and Gerd, acked by mst -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQEcBAABCAAGBQJVceAwAAoJEL/70l94x66Dyz4H/RHS/OUGo6HOwG1FZ4l8RxRl FY+pwJqinxFyGySmMLVHEeQCsIfxgi8bOmuWblG7sdt245nhMIj2jglyEOCUA3RN Q9qxQr6QyXBWiwK4bfB7xI1z3/mc8cVvuxjtkLaBMa16A4MXMunWCDcyhsX9/0Vw VySgTgBbn5AyY5x58TbkB7Tl6hMZgxF0yNwU6IGQvP079dgREAL2tzR1Wk8kPC80 ltLWlrwTAzF2km5m6rmstpMeZ/XIaq3DD2LU03SyUhefMsYowGKK+7Boo4lHpVm9 XAlxflahN7VGtQuno5RpYNNSzGqSJgqu5X5JxCMnbWdPi4sX3bijQdcUhW3/0oo= =KPIz -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging * KVM error improvement from Laurent * CONFIG_PARALLEL fix from Mirek * Atomic/optimized dirty bitmap access from myself and Stefan * BUILD_DIR convenience/bugfix from Peter C * Memory leak fix from Shannon * SMM improvements (though still TCG only) from myself and Gerd, acked by mst # gpg: Signature made Fri Jun 5 18:45:20 2015 BST using RSA key ID 78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" # gpg: WARNING: This key is not certified with sufficiently trusted signatures! # gpg: It is not certain that the signature belongs to the owner. # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * remotes/bonzini/tags/for-upstream: (62 commits) update Linux headers from kvm/next atomics: add explicit compiler fence in __atomic memory barriers ich9: implement SMI_LOCK q35: implement TSEG q35: add test for SMRAM.D_LCK q35: implement SMRAM.D_LCK q35: add config space wmask for SMRAM and ESMRAMC q35: fix ESMRAMC default q35: implement high SMRAM hw/i386: remove smram_update target-i386: use memory API to implement SMRAM hw/i386: add a separate region that tracks the SMRAME bit target-i386: create a separate AddressSpace for each CPU vl: run "late" notifiers immediately qom: add object_property_add_const_link vl: allow full-blown QemuOpts syntax for -global pflash_cfi01: add secure property pflash_cfi01: change to new-style MMIO accessors pflash_cfi01: change big-endian property to BIT type target-i386: wake up processors that receive an SMI ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
ee09f84e6b
75 changed files with 1548 additions and 1043 deletions
|
@ -22,8 +22,7 @@
|
|||
#if !defined(CONFIG_USER_ONLY)
|
||||
/* cputlb.c */
|
||||
void tlb_protect_code(ram_addr_t ram_addr);
|
||||
void tlb_unprotect_code_phys(CPUState *cpu, ram_addr_t ram_addr,
|
||||
target_ulong vaddr);
|
||||
void tlb_unprotect_code(ram_addr_t ram_addr);
|
||||
void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
|
||||
uintptr_t length);
|
||||
void cpu_tlb_reset_dirty_all(ram_addr_t start1, ram_addr_t length);
|
||||
|
|
|
@ -90,11 +90,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
|||
int cflags);
|
||||
void cpu_exec_init(CPUArchState *env);
|
||||
void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
|
||||
int page_unprotect(target_ulong address, uintptr_t pc, void *puc);
|
||||
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
|
||||
int is_cpu_write_access);
|
||||
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
|
||||
int is_cpu_write_access);
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
bool qemu_in_vcpu_thread(void);
|
||||
void cpu_reload_memory_map(CPUState *cpu);
|
||||
|
|
|
@ -29,7 +29,9 @@ typedef struct MemTxAttrs {
|
|||
* "didn't specify" if necessary.
|
||||
*/
|
||||
unsigned int unspecified:1;
|
||||
/* ARM/AMBA TrustZone Secure access */
|
||||
/* ARM/AMBA: TrustZone Secure access
|
||||
* x86: System Management Mode access
|
||||
*/
|
||||
unsigned int secure:1;
|
||||
/* Memory access is usermode (unprivileged) */
|
||||
unsigned int user:1;
|
||||
|
|
|
@ -206,8 +206,10 @@ struct MemoryListener {
|
|||
void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
|
||||
void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
|
||||
void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
|
||||
void (*log_start)(MemoryListener *listener, MemoryRegionSection *section);
|
||||
void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section);
|
||||
void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
|
||||
int old, int new);
|
||||
void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
|
||||
int old, int new);
|
||||
void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
|
||||
void (*log_global_start)(MemoryListener *listener);
|
||||
void (*log_global_stop)(MemoryListener *listener);
|
||||
|
@ -591,11 +593,23 @@ const char *memory_region_name(const MemoryRegion *mr);
|
|||
/**
|
||||
* memory_region_is_logging: return whether a memory region is logging writes
|
||||
*
|
||||
* Returns %true if the memory region is logging writes
|
||||
* Returns %true if the memory region is logging writes for the given client
|
||||
*
|
||||
* @mr: the memory region being queried
|
||||
* @client: the client being queried
|
||||
*/
|
||||
bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
|
||||
|
||||
/**
|
||||
* memory_region_get_dirty_log_mask: return the clients for which a
|
||||
* memory region is logging writes.
|
||||
*
|
||||
* Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
|
||||
* are the bit indices.
|
||||
*
|
||||
* @mr: the memory region being queried
|
||||
*/
|
||||
bool memory_region_is_logging(MemoryRegion *mr);
|
||||
uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
|
||||
|
||||
/**
|
||||
* memory_region_is_rom: check whether a memory region is ROM
|
||||
|
@ -647,8 +661,7 @@ void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
|
|||
*
|
||||
* @mr: the memory region being updated.
|
||||
* @log: whether dirty logging is to be enabled or disabled.
|
||||
* @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
|
||||
* %DIRTY_MEMORY_VGA.
|
||||
* @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
|
||||
*/
|
||||
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
|
||||
|
||||
|
|
|
@ -41,6 +41,9 @@ void qemu_ram_free_from_ptr(ram_addr_t addr);
|
|||
|
||||
int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp);
|
||||
|
||||
#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
|
||||
#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
|
||||
|
||||
static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
|
||||
ram_addr_t length,
|
||||
unsigned client)
|
||||
|
@ -56,7 +59,7 @@ static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
|
|||
return next < end;
|
||||
}
|
||||
|
||||
static inline bool cpu_physical_memory_get_clean(ram_addr_t start,
|
||||
static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
|
||||
ram_addr_t length,
|
||||
unsigned client)
|
||||
{
|
||||
|
@ -68,7 +71,7 @@ static inline bool cpu_physical_memory_get_clean(ram_addr_t start,
|
|||
page = start >> TARGET_PAGE_BITS;
|
||||
next = find_next_zero_bit(ram_list.dirty_memory[client], end, page);
|
||||
|
||||
return next < end;
|
||||
return next >= end;
|
||||
}
|
||||
|
||||
static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
|
||||
|
@ -86,44 +89,52 @@ static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
|
|||
return !(vga && code && migration);
|
||||
}
|
||||
|
||||
static inline bool cpu_physical_memory_range_includes_clean(ram_addr_t start,
|
||||
ram_addr_t length)
|
||||
static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
|
||||
ram_addr_t length,
|
||||
uint8_t mask)
|
||||
{
|
||||
bool vga = cpu_physical_memory_get_clean(start, length, DIRTY_MEMORY_VGA);
|
||||
bool code = cpu_physical_memory_get_clean(start, length, DIRTY_MEMORY_CODE);
|
||||
bool migration =
|
||||
cpu_physical_memory_get_clean(start, length, DIRTY_MEMORY_MIGRATION);
|
||||
return vga || code || migration;
|
||||
uint8_t ret = 0;
|
||||
|
||||
if (mask & (1 << DIRTY_MEMORY_VGA) &&
|
||||
!cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
|
||||
ret |= (1 << DIRTY_MEMORY_VGA);
|
||||
}
|
||||
if (mask & (1 << DIRTY_MEMORY_CODE) &&
|
||||
!cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
|
||||
ret |= (1 << DIRTY_MEMORY_CODE);
|
||||
}
|
||||
if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
|
||||
!cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
|
||||
ret |= (1 << DIRTY_MEMORY_MIGRATION);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
|
||||
unsigned client)
|
||||
{
|
||||
assert(client < DIRTY_MEMORY_NUM);
|
||||
set_bit(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]);
|
||||
}
|
||||
|
||||
static inline void cpu_physical_memory_set_dirty_range_nocode(ram_addr_t start,
|
||||
ram_addr_t length)
|
||||
{
|
||||
unsigned long end, page;
|
||||
|
||||
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
|
||||
page = start >> TARGET_PAGE_BITS;
|
||||
bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION], page, end - page);
|
||||
bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_VGA], page, end - page);
|
||||
set_bit_atomic(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]);
|
||||
}
|
||||
|
||||
static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
|
||||
ram_addr_t length)
|
||||
ram_addr_t length,
|
||||
uint8_t mask)
|
||||
{
|
||||
unsigned long end, page;
|
||||
unsigned long **d = ram_list.dirty_memory;
|
||||
|
||||
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
|
||||
page = start >> TARGET_PAGE_BITS;
|
||||
bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION], page, end - page);
|
||||
bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_VGA], page, end - page);
|
||||
bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_CODE], page, end - page);
|
||||
if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
|
||||
bitmap_set_atomic(d[DIRTY_MEMORY_MIGRATION], page, end - page);
|
||||
}
|
||||
if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
|
||||
bitmap_set_atomic(d[DIRTY_MEMORY_VGA], page, end - page);
|
||||
}
|
||||
if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
|
||||
bitmap_set_atomic(d[DIRTY_MEMORY_CODE], page, end - page);
|
||||
}
|
||||
xen_modified_memory(start, length);
|
||||
}
|
||||
|
||||
|
@ -149,14 +160,18 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
|
|||
for (k = 0; k < nr; k++) {
|
||||
if (bitmap[k]) {
|
||||
unsigned long temp = leul_to_cpu(bitmap[k]);
|
||||
unsigned long **d = ram_list.dirty_memory;
|
||||
|
||||
ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION][page + k] |= temp;
|
||||
ram_list.dirty_memory[DIRTY_MEMORY_VGA][page + k] |= temp;
|
||||
ram_list.dirty_memory[DIRTY_MEMORY_CODE][page + k] |= temp;
|
||||
atomic_or(&d[DIRTY_MEMORY_MIGRATION][page + k], temp);
|
||||
atomic_or(&d[DIRTY_MEMORY_VGA][page + k], temp);
|
||||
if (tcg_enabled()) {
|
||||
atomic_or(&d[DIRTY_MEMORY_CODE][page + k], temp);
|
||||
}
|
||||
}
|
||||
}
|
||||
xen_modified_memory(start, pages);
|
||||
xen_modified_memory(start, pages << TARGET_PAGE_BITS);
|
||||
} else {
|
||||
uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
|
||||
/*
|
||||
* bitmap-traveling is faster than memory-traveling (for addr...)
|
||||
* especially when most of the memory is not dirty.
|
||||
|
@ -171,7 +186,7 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
|
|||
addr = page_number * TARGET_PAGE_SIZE;
|
||||
ram_addr = start + addr;
|
||||
cpu_physical_memory_set_dirty_range(ram_addr,
|
||||
TARGET_PAGE_SIZE * hpratio);
|
||||
TARGET_PAGE_SIZE * hpratio, clients);
|
||||
} while (c != 0);
|
||||
}
|
||||
}
|
||||
|
@ -179,29 +194,60 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
|
|||
}
|
||||
#endif /* not _WIN32 */
|
||||
|
||||
static inline void cpu_physical_memory_clear_dirty_range_type(ram_addr_t start,
|
||||
ram_addr_t length,
|
||||
unsigned client)
|
||||
{
|
||||
unsigned long end, page;
|
||||
|
||||
assert(client < DIRTY_MEMORY_NUM);
|
||||
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
|
||||
page = start >> TARGET_PAGE_BITS;
|
||||
bitmap_clear(ram_list.dirty_memory[client], page, end - page);
|
||||
}
|
||||
bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
|
||||
ram_addr_t length,
|
||||
unsigned client);
|
||||
|
||||
static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
|
||||
ram_addr_t length)
|
||||
{
|
||||
cpu_physical_memory_clear_dirty_range_type(start, length, DIRTY_MEMORY_MIGRATION);
|
||||
cpu_physical_memory_clear_dirty_range_type(start, length, DIRTY_MEMORY_VGA);
|
||||
cpu_physical_memory_clear_dirty_range_type(start, length, DIRTY_MEMORY_CODE);
|
||||
cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
|
||||
cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
|
||||
cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
|
||||
}
|
||||
|
||||
|
||||
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
|
||||
unsigned client);
|
||||
static inline
|
||||
uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
|
||||
ram_addr_t start,
|
||||
ram_addr_t length)
|
||||
{
|
||||
ram_addr_t addr;
|
||||
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
|
||||
uint64_t num_dirty = 0;
|
||||
|
||||
/* start address is aligned at the start of a word? */
|
||||
if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
|
||||
int k;
|
||||
int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
|
||||
unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
|
||||
|
||||
for (k = page; k < page + nr; k++) {
|
||||
if (src[k]) {
|
||||
unsigned long bits = atomic_xchg(&src[k], 0);
|
||||
unsigned long new_dirty;
|
||||
new_dirty = ~dest[k];
|
||||
dest[k] |= bits;
|
||||
new_dirty &= bits;
|
||||
num_dirty += ctpopl(new_dirty);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
|
||||
if (cpu_physical_memory_test_and_clear_dirty(
|
||||
start + addr,
|
||||
TARGET_PAGE_SIZE,
|
||||
DIRTY_MEMORY_MIGRATION)) {
|
||||
long k = (start + addr) >> TARGET_PAGE_BITS;
|
||||
if (!test_and_set_bit(k, dest)) {
|
||||
num_dirty++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return num_dirty;
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -39,6 +39,7 @@ typedef struct ICH9LPCPMRegs {
|
|||
MemoryRegion io_smi;
|
||||
|
||||
uint32_t smi_en;
|
||||
uint32_t smi_en_wmask;
|
||||
uint32_t smi_sts;
|
||||
|
||||
qemu_irq irq; /* SCI */
|
||||
|
|
|
@ -152,6 +152,12 @@ Object *ich9_lpc_find(void);
|
|||
#define ICH9_LPC_PIRQ_ROUT_MASK Q35_MASK(8, 3, 0)
|
||||
#define ICH9_LPC_PIRQ_ROUT_DEFAULT 0x80
|
||||
|
||||
#define ICH9_LPC_GEN_PMCON_1 0xa0
|
||||
#define ICH9_LPC_GEN_PMCON_1_SMI_LOCK (1 << 4)
|
||||
#define ICH9_LPC_GEN_PMCON_2 0xa2
|
||||
#define ICH9_LPC_GEN_PMCON_3 0xa4
|
||||
#define ICH9_LPC_GEN_PMCON_LOCK 0xa6
|
||||
|
||||
#define ICH9_LPC_RCBA 0xf0
|
||||
#define ICH9_LPC_RCBA_BA_MASK Q35_MASK(32, 31, 14)
|
||||
#define ICH9_LPC_RCBA_EN 0x1
|
||||
|
|
|
@ -210,7 +210,6 @@ void pc_nic_init(ISABus *isa_bus, PCIBus *pci_bus);
|
|||
void pc_pci_device_init(PCIBus *pci_bus);
|
||||
|
||||
typedef void (*cpu_set_smm_t)(int smm, void *arg);
|
||||
void cpu_smm_register(cpu_set_smm_t callback, void *arg);
|
||||
|
||||
void ioapic_init_gsi(GSIState *gsi_state, const char *parent_name);
|
||||
|
||||
|
|
|
@ -86,10 +86,6 @@ typedef struct PAMMemoryRegion {
|
|||
unsigned current;
|
||||
} PAMMemoryRegion;
|
||||
|
||||
void smram_update(MemoryRegion *smram_region, uint8_t smram,
|
||||
uint8_t smm_enabled);
|
||||
void smram_set_smm(uint8_t *host_smm_enabled, int smm, uint8_t smram,
|
||||
MemoryRegion *smram_region);
|
||||
void init_pam(DeviceState *dev, MemoryRegion *ram, MemoryRegion *system,
|
||||
MemoryRegion *pci, PAMMemoryRegion *mem, uint32_t start, uint32_t size);
|
||||
void pam_update(PAMMemoryRegion *mem, int idx, uint8_t val);
|
||||
|
|
|
@ -52,9 +52,10 @@ typedef struct MCHPCIState {
|
|||
MemoryRegion *system_memory;
|
||||
MemoryRegion *address_space_io;
|
||||
PAMMemoryRegion pam_regions[13];
|
||||
MemoryRegion smram_region;
|
||||
MemoryRegion smram_region, open_high_smram;
|
||||
MemoryRegion smram, low_smram, high_smram;
|
||||
MemoryRegion tseg_blackhole, tseg_window;
|
||||
PcPciInfo pci_info;
|
||||
uint8_t smm_enabled;
|
||||
ram_addr_t below_4g_mem_size;
|
||||
ram_addr_t above_4g_mem_size;
|
||||
uint64_t pci_hole64_size;
|
||||
|
@ -127,8 +128,7 @@ typedef struct Q35PCIHost {
|
|||
#define MCH_HOST_BRIDGE_PAM_MASK ((uint8_t)0x3)
|
||||
|
||||
#define MCH_HOST_BRIDGE_SMRAM 0x9d
|
||||
#define MCH_HOST_BRIDGE_SMRAM_SIZE 1
|
||||
#define MCH_HOST_BRIDGE_SMRAM_DEFAULT ((uint8_t)0x2)
|
||||
#define MCH_HOST_BRIDGE_SMRAM_SIZE 2
|
||||
#define MCH_HOST_BRIDGE_SMRAM_D_OPEN ((uint8_t)(1 << 6))
|
||||
#define MCH_HOST_BRIDGE_SMRAM_D_CLS ((uint8_t)(1 << 5))
|
||||
#define MCH_HOST_BRIDGE_SMRAM_D_LCK ((uint8_t)(1 << 4))
|
||||
|
@ -139,18 +139,36 @@ typedef struct Q35PCIHost {
|
|||
#define MCH_HOST_BRIDGE_SMRAM_C_END 0xc0000
|
||||
#define MCH_HOST_BRIDGE_SMRAM_C_SIZE 0x20000
|
||||
#define MCH_HOST_BRIDGE_UPPER_SYSTEM_BIOS_END 0x100000
|
||||
#define MCH_HOST_BRIDGE_SMRAM_DEFAULT \
|
||||
MCH_HOST_BRIDGE_SMRAM_C_BASE_SEG
|
||||
#define MCH_HOST_BRIDGE_SMRAM_WMASK \
|
||||
(MCH_HOST_BRIDGE_SMRAM_D_OPEN | \
|
||||
MCH_HOST_BRIDGE_SMRAM_D_CLS | \
|
||||
MCH_HOST_BRIDGE_SMRAM_D_LCK | \
|
||||
MCH_HOST_BRIDGE_SMRAM_G_SMRAME)
|
||||
#define MCH_HOST_BRIDGE_SMRAM_WMASK_LCK \
|
||||
MCH_HOST_BRIDGE_SMRAM_D_CLS
|
||||
|
||||
#define MCH_HOST_BRIDGE_ESMRAMC 0x9e
|
||||
#define MCH_HOST_BRIDGE_ESMRAMC_H_SMRAME ((uint8_t)(1 << 6))
|
||||
#define MCH_HOST_BRIDGE_ESMRAMC_E_SMERR ((uint8_t)(1 << 5))
|
||||
#define MCH_HOST_BRIDGE_ESMRAMC_SM_CACHE ((uint8_t)(1 << 4))
|
||||
#define MCH_HOST_BRIDGE_ESMRAMC_SM_L1 ((uint8_t)(1 << 3))
|
||||
#define MCH_HOST_BRIDGE_ESMRAMC_SM_L2 ((uint8_t)(1 << 2))
|
||||
#define MCH_HOST_BRIDGE_ESMRAMC_H_SMRAME ((uint8_t)(1 << 7))
|
||||
#define MCH_HOST_BRIDGE_ESMRAMC_E_SMERR ((uint8_t)(1 << 6))
|
||||
#define MCH_HOST_BRIDGE_ESMRAMC_SM_CACHE ((uint8_t)(1 << 5))
|
||||
#define MCH_HOST_BRIDGE_ESMRAMC_SM_L1 ((uint8_t)(1 << 4))
|
||||
#define MCH_HOST_BRIDGE_ESMRAMC_SM_L2 ((uint8_t)(1 << 3))
|
||||
#define MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_MASK ((uint8_t)(0x3 << 1))
|
||||
#define MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_1MB ((uint8_t)(0x0 << 1))
|
||||
#define MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_2MB ((uint8_t)(0x1 << 1))
|
||||
#define MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_8MB ((uint8_t)(0x2 << 1))
|
||||
#define MCH_HOST_BRIDGE_ESMRAMC_T_EN ((uint8_t)1)
|
||||
#define MCH_HOST_BRIDGE_ESMRAMC_DEFAULT \
|
||||
(MCH_HOST_BRIDGE_ESMRAMC_SM_CACHE | \
|
||||
MCH_HOST_BRIDGE_ESMRAMC_SM_L1 | \
|
||||
MCH_HOST_BRIDGE_ESMRAMC_SM_L2)
|
||||
#define MCH_HOST_BRIDGE_ESMRAMC_WMASK \
|
||||
(MCH_HOST_BRIDGE_ESMRAMC_H_SMRAME | \
|
||||
MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_MASK | \
|
||||
MCH_HOST_BRIDGE_ESMRAMC_T_EN)
|
||||
#define MCH_HOST_BRIDGE_ESMRAMC_WMASK_LCK 0
|
||||
|
||||
/* D1:F0 PCIE* port*/
|
||||
#define MCH_PCIE_DEV 1
|
||||
|
|
|
@ -99,7 +99,13 @@
|
|||
|
||||
#ifndef smp_wmb
|
||||
#ifdef __ATOMIC_RELEASE
|
||||
#define smp_wmb() __atomic_thread_fence(__ATOMIC_RELEASE)
|
||||
/* __atomic_thread_fence does not include a compiler barrier; instead,
|
||||
* the barrier is part of __atomic_load/__atomic_store's "volatile-like"
|
||||
* semantics. If smp_wmb() is a no-op, absence of the barrier means that
|
||||
* the compiler is free to reorder stores on each side of the barrier.
|
||||
* Add one here, and similarly in smp_rmb() and smp_read_barrier_depends().
|
||||
*/
|
||||
#define smp_wmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); barrier(); })
|
||||
#else
|
||||
#define smp_wmb() __sync_synchronize()
|
||||
#endif
|
||||
|
@ -107,7 +113,7 @@
|
|||
|
||||
#ifndef smp_rmb
|
||||
#ifdef __ATOMIC_ACQUIRE
|
||||
#define smp_rmb() __atomic_thread_fence(__ATOMIC_ACQUIRE)
|
||||
#define smp_rmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); barrier(); })
|
||||
#else
|
||||
#define smp_rmb() __sync_synchronize()
|
||||
#endif
|
||||
|
@ -115,7 +121,7 @@
|
|||
|
||||
#ifndef smp_read_barrier_depends
|
||||
#ifdef __ATOMIC_CONSUME
|
||||
#define smp_read_barrier_depends() __atomic_thread_fence(__ATOMIC_CONSUME)
|
||||
#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); barrier(); })
|
||||
#else
|
||||
#define smp_read_barrier_depends() barrier()
|
||||
#endif
|
||||
|
|
|
@ -39,7 +39,9 @@
|
|||
* bitmap_empty(src, nbits) Are all bits zero in *src?
|
||||
* bitmap_full(src, nbits) Are all bits set in *src?
|
||||
* bitmap_set(dst, pos, nbits) Set specified bit area
|
||||
* bitmap_set_atomic(dst, pos, nbits) Set specified bit area with atomic ops
|
||||
* bitmap_clear(dst, pos, nbits) Clear specified bit area
|
||||
* bitmap_test_and_clear_atomic(dst, pos, nbits) Test and clear area
|
||||
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
|
||||
*/
|
||||
|
||||
|
@ -226,7 +228,9 @@ static inline int bitmap_intersects(const unsigned long *src1,
|
|||
}
|
||||
|
||||
void bitmap_set(unsigned long *map, long i, long len);
|
||||
void bitmap_set_atomic(unsigned long *map, long i, long len);
|
||||
void bitmap_clear(unsigned long *map, long start, long nr);
|
||||
bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr);
|
||||
unsigned long bitmap_find_next_zero_area(unsigned long *map,
|
||||
unsigned long size,
|
||||
unsigned long start,
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <assert.h>
|
||||
|
||||
#include "host-utils.h"
|
||||
#include "atomic.h"
|
||||
|
||||
#define BITS_PER_BYTE CHAR_BIT
|
||||
#define BITS_PER_LONG (sizeof (unsigned long) * BITS_PER_BYTE)
|
||||
|
@ -38,6 +39,19 @@ static inline void set_bit(long nr, unsigned long *addr)
|
|||
*p |= mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* set_bit_atomic - Set a bit in memory atomically
|
||||
* @nr: the bit to set
|
||||
* @addr: the address to start counting from
|
||||
*/
|
||||
static inline void set_bit_atomic(long nr, unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = addr + BIT_WORD(nr);
|
||||
|
||||
atomic_or(p, mask);
|
||||
}
|
||||
|
||||
/**
|
||||
* clear_bit - Clears a bit in memory
|
||||
* @nr: Bit to clear
|
||||
|
|
|
@ -1289,6 +1289,24 @@ void object_property_add_alias(Object *obj, const char *name,
|
|||
Object *target_obj, const char *target_name,
|
||||
Error **errp);
|
||||
|
||||
/**
|
||||
* object_property_add_const_link:
|
||||
* @obj: the object to add a property to
|
||||
* @name: the name of the property
|
||||
* @target: the object to be referred by the link
|
||||
* @errp: if an error occurs, a pointer to an area to store the error
|
||||
*
|
||||
* Add an unmodifiable link for a property on an object. This function will
|
||||
* add a property of type link<TYPE> where TYPE is the type of @target.
|
||||
*
|
||||
* The caller must ensure that @target stays alive as long as
|
||||
* this property exists. In the case @target is a child of @obj,
|
||||
* this will be the case. Otherwise, the caller is responsible for
|
||||
* taking a reference.
|
||||
*/
|
||||
void object_property_add_const_link(Object *obj, const char *name,
|
||||
Object *target, Error **errp);
|
||||
|
||||
/**
|
||||
* object_property_set_description:
|
||||
* @obj: the object owning the property
|
||||
|
|
|
@ -155,7 +155,7 @@ static inline unsigned vring_size(unsigned int num, unsigned long align)
|
|||
}
|
||||
|
||||
/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
|
||||
/* Assuming a given event_idx value from the other size, if
|
||||
/* Assuming a given event_idx value from the other side, if
|
||||
* we have just incremented index from old to new_idx,
|
||||
* should we trigger an event? */
|
||||
static inline int vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
|
||||
|
|
|
@ -241,10 +241,6 @@ void dpy_text_resize(QemuConsole *con, int w, int h);
|
|||
void dpy_mouse_set(QemuConsole *con, int x, int y, int on);
|
||||
void dpy_cursor_define(QemuConsole *con, QEMUCursor *cursor);
|
||||
bool dpy_cursor_define_supported(QemuConsole *con);
|
||||
void dpy_gfx_update_dirty(QemuConsole *con,
|
||||
MemoryRegion *address_space,
|
||||
uint64_t base,
|
||||
bool invalidate);
|
||||
bool dpy_gfx_check_format(QemuConsole *con,
|
||||
pixman_format_code_t format);
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue