mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-07 01:33:56 -06:00
Fix race conditions in new user-only vma tracking.
Add tcg backend paired register allocation. Cleanup tcg backend function call abi. -----BEGIN PGP SIGNATURE----- iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmO3kZEdHHJpY2hhcmQu aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/JpwgAj9kwpiWehGWrpQp9 rbEL+Fsx+SDhnoLVpF6nmSB1nkDqdgkdnhyRaLX9wM69bnocsGppZ5sd57J/cH3m WiODVVbWP80WHonx5EN4htQv99TZWqVmXVl11DwOfsRUmINl4GG4kvHOOABd8hdc 39eRgGBBMyMShc6MUJiToyjEAcZPcGAiHkSW9YDGbvzhlloNWh46eLP1bdW3UJWK UiEwPpXqg+L0V8nuuQnSFoPr5FIJmmoTeiGCRHXtvgOT7J8/6eKUESpfcKkHq1ye dwcJQATuZip3+hyCCVveiZ86TQ81RMp9en1qw+HVzfed1Ial3Tk+tqiDqZJFm25b GMpa5g== =OjPl -----END PGP SIGNATURE----- Merge tag 'pull-tcg-20230105' of https://gitlab.com/rth7680/qemu into staging Fix race conditions in new user-only vma tracking. Add tcg backend paired register allocation. Cleanup tcg backend function call abi. # gpg: Signature made Fri 06 Jan 2023 03:12:17 GMT # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * tag 'pull-tcg-20230105' of https://gitlab.com/rth7680/qemu: (47 commits) tests/tcg/multiarch: add vma-pthread.c accel/tcg: Handle false negative lookup in page_check_range accel/tcg: Use g_free_rcu for user-exec interval trees accel/tcg: Fix tb_invalidate_phys_page_unwind tcg: Add TCGHelperInfo argument to tcg_out_call tcg/aarch64: Merge tcg_out_callr into tcg_out_call tcg: Move ffi_cif pointer into TCGHelperInfo tcg: Factor init_ffi_layouts() out of tcg_context_init() tcg: Convert typecode_to_ffi from array to function tcg: Reorg function calls tcg: Use output_pref wrapper function tcg: Vary the allocation size for TCGOp tcg: Pass number of arguments to tcg_emit_op() / tcg_op_insert_*() accel/tcg/plugin: Use copy_op in append_{udata,mem}_cb accel/tcg/plugin: Avoid duplicate copy in copy_call accel/tcg/plugin: Don't search for the function pointer index tcg: Use TCG_CALL_ARG_EVEN for TCI special case tcg: Replace TCG_TARGET_EXTEND_ARGS with TCG_TARGET_CALL_ARG_I32 tcg: Replace TCG_TARGET_CALL_ALIGN_ARGS with TCG_TARGET_CALL_ARG_I64 tcg: Introduce TCGCallReturnKind and TCGCallArgumentKind ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
aaa90fede5
50 changed files with 2635 additions and 1763 deletions
|
@ -39,6 +39,9 @@ signals: LDFLAGS+=-lrt -lpthread
|
|||
munmap-pthread: CFLAGS+=-pthread
|
||||
munmap-pthread: LDFLAGS+=-pthread
|
||||
|
||||
vma-pthread: CFLAGS+=-pthread
|
||||
vma-pthread: LDFLAGS+=-pthread
|
||||
|
||||
# We define the runner for test-mmap after the individual
|
||||
# architectures have defined their supported pages sizes. If no
|
||||
# additional page sizes are defined we only run the default test.
|
||||
|
|
|
@ -7,21 +7,7 @@
|
|||
#include <sys/mman.h>
|
||||
#include <unistd.h>
|
||||
|
||||
static const char nop_func[] = {
|
||||
#if defined(__aarch64__)
|
||||
0xc0, 0x03, 0x5f, 0xd6, /* ret */
|
||||
#elif defined(__alpha__)
|
||||
0x01, 0x80, 0xFA, 0x6B, /* ret */
|
||||
#elif defined(__arm__)
|
||||
0x1e, 0xff, 0x2f, 0xe1, /* bx lr */
|
||||
#elif defined(__riscv)
|
||||
0x67, 0x80, 0x00, 0x00, /* ret */
|
||||
#elif defined(__s390__)
|
||||
0x07, 0xfe, /* br %r14 */
|
||||
#elif defined(__i386__) || defined(__x86_64__)
|
||||
0xc3, /* ret */
|
||||
#endif
|
||||
};
|
||||
#include "nop_func.h"
|
||||
|
||||
static void *thread_mmap_munmap(void *arg)
|
||||
{
|
||||
|
|
25
tests/tcg/multiarch/nop_func.h
Normal file
25
tests/tcg/multiarch/nop_func.h
Normal file
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
* No-op functions that can be safely copied.
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
#ifndef NOP_FUNC_H
|
||||
#define NOP_FUNC_H
|
||||
|
||||
static const char nop_func[] = {
|
||||
#if defined(__aarch64__)
|
||||
0xc0, 0x03, 0x5f, 0xd6, /* ret */
|
||||
#elif defined(__alpha__)
|
||||
0x01, 0x80, 0xFA, 0x6B, /* ret */
|
||||
#elif defined(__arm__)
|
||||
0x1e, 0xff, 0x2f, 0xe1, /* bx lr */
|
||||
#elif defined(__riscv)
|
||||
0x67, 0x80, 0x00, 0x00, /* ret */
|
||||
#elif defined(__s390__)
|
||||
0x07, 0xfe, /* br %r14 */
|
||||
#elif defined(__i386__) || defined(__x86_64__)
|
||||
0xc3, /* ret */
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif
|
207
tests/tcg/multiarch/vma-pthread.c
Normal file
207
tests/tcg/multiarch/vma-pthread.c
Normal file
|
@ -0,0 +1,207 @@
|
|||
/*
|
||||
* Test that VMA updates do not race.
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*
|
||||
* Map a contiguous chunk of RWX memory. Split it into 8 equally sized
|
||||
* regions, each of which is guaranteed to have a certain combination of
|
||||
* protection bits set.
|
||||
*
|
||||
* Reader, writer and executor threads perform the respective operations on
|
||||
* pages, which are guaranteed to have the respective protection bit set.
|
||||
* Two mutator threads change the non-fixed protection bits randomly.
|
||||
*/
|
||||
#include <assert.h>
|
||||
#include <fcntl.h>
|
||||
#include <pthread.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/mman.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "nop_func.h"
|
||||
|
||||
#define PAGE_IDX_BITS 10
|
||||
#define PAGE_COUNT (1 << PAGE_IDX_BITS)
|
||||
#define PAGE_IDX_MASK (PAGE_COUNT - 1)
|
||||
#define REGION_IDX_BITS 3
|
||||
#define PAGE_IDX_R_MASK (1 << 7)
|
||||
#define PAGE_IDX_W_MASK (1 << 8)
|
||||
#define PAGE_IDX_X_MASK (1 << 9)
|
||||
#define REGION_MASK (PAGE_IDX_R_MASK | PAGE_IDX_W_MASK | PAGE_IDX_X_MASK)
|
||||
#define PAGES_PER_REGION (1 << (PAGE_IDX_BITS - REGION_IDX_BITS))
|
||||
|
||||
struct context {
|
||||
int pagesize;
|
||||
char *ptr;
|
||||
int dev_null_fd;
|
||||
volatile int mutator_count;
|
||||
};
|
||||
|
||||
static void *thread_read(void *arg)
|
||||
{
|
||||
struct context *ctx = arg;
|
||||
ssize_t sret;
|
||||
size_t i, j;
|
||||
int ret;
|
||||
|
||||
for (i = 0; ctx->mutator_count; i++) {
|
||||
char *p;
|
||||
|
||||
j = (i & PAGE_IDX_MASK) | PAGE_IDX_R_MASK;
|
||||
p = &ctx->ptr[j * ctx->pagesize];
|
||||
|
||||
/* Read directly. */
|
||||
ret = memcmp(p, nop_func, sizeof(nop_func));
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "fail direct read %p\n", p);
|
||||
abort();
|
||||
}
|
||||
|
||||
/* Read indirectly. */
|
||||
sret = write(ctx->dev_null_fd, p, 1);
|
||||
if (sret != 1) {
|
||||
if (sret < 0) {
|
||||
fprintf(stderr, "fail indirect read %p (%m)\n", p);
|
||||
} else {
|
||||
fprintf(stderr, "fail indirect read %p (%zd)\n", p, sret);
|
||||
}
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *thread_write(void *arg)
|
||||
{
|
||||
struct context *ctx = arg;
|
||||
struct timespec *ts;
|
||||
size_t i, j;
|
||||
int ret;
|
||||
|
||||
for (i = 0; ctx->mutator_count; i++) {
|
||||
j = (i & PAGE_IDX_MASK) | PAGE_IDX_W_MASK;
|
||||
|
||||
/* Write directly. */
|
||||
memcpy(&ctx->ptr[j * ctx->pagesize], nop_func, sizeof(nop_func));
|
||||
|
||||
/* Write using a syscall. */
|
||||
ts = (struct timespec *)(&ctx->ptr[(j + 1) * ctx->pagesize] -
|
||||
sizeof(struct timespec));
|
||||
ret = clock_gettime(CLOCK_REALTIME, ts);
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "fail indirect write %p (%m)\n", ts);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *thread_execute(void *arg)
|
||||
{
|
||||
struct context *ctx = arg;
|
||||
size_t i, j;
|
||||
|
||||
for (i = 0; ctx->mutator_count; i++) {
|
||||
j = (i & PAGE_IDX_MASK) | PAGE_IDX_X_MASK;
|
||||
((void(*)(void))&ctx->ptr[j * ctx->pagesize])();
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *thread_mutate(void *arg)
|
||||
{
|
||||
size_t i, start_idx, end_idx, page_idx, tmp;
|
||||
struct context *ctx = arg;
|
||||
unsigned int seed;
|
||||
int prot, ret;
|
||||
|
||||
seed = (unsigned int)time(NULL);
|
||||
for (i = 0; i < 10000; i++) {
|
||||
start_idx = rand_r(&seed) & PAGE_IDX_MASK;
|
||||
end_idx = rand_r(&seed) & PAGE_IDX_MASK;
|
||||
if (start_idx > end_idx) {
|
||||
tmp = start_idx;
|
||||
start_idx = end_idx;
|
||||
end_idx = tmp;
|
||||
}
|
||||
prot = rand_r(&seed) & (PROT_READ | PROT_WRITE | PROT_EXEC);
|
||||
for (page_idx = start_idx & REGION_MASK; page_idx <= end_idx;
|
||||
page_idx += PAGES_PER_REGION) {
|
||||
if (page_idx & PAGE_IDX_R_MASK) {
|
||||
prot |= PROT_READ;
|
||||
}
|
||||
if (page_idx & PAGE_IDX_W_MASK) {
|
||||
/* FIXME: qemu syscalls check for both read+write. */
|
||||
prot |= PROT_WRITE | PROT_READ;
|
||||
}
|
||||
if (page_idx & PAGE_IDX_X_MASK) {
|
||||
prot |= PROT_EXEC;
|
||||
}
|
||||
}
|
||||
ret = mprotect(&ctx->ptr[start_idx * ctx->pagesize],
|
||||
(end_idx - start_idx + 1) * ctx->pagesize, prot);
|
||||
assert(ret == 0);
|
||||
}
|
||||
|
||||
__atomic_fetch_sub(&ctx->mutator_count, 1, __ATOMIC_SEQ_CST);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int main(void)
|
||||
{
|
||||
pthread_t threads[5];
|
||||
struct context ctx;
|
||||
size_t i;
|
||||
int ret;
|
||||
|
||||
/* Without a template, nothing to test. */
|
||||
if (sizeof(nop_func) == 0) {
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
/* Initialize memory chunk. */
|
||||
ctx.pagesize = getpagesize();
|
||||
ctx.ptr = mmap(NULL, PAGE_COUNT * ctx.pagesize,
|
||||
PROT_READ | PROT_WRITE | PROT_EXEC,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
assert(ctx.ptr != MAP_FAILED);
|
||||
for (i = 0; i < PAGE_COUNT; i++) {
|
||||
memcpy(&ctx.ptr[i * ctx.pagesize], nop_func, sizeof(nop_func));
|
||||
}
|
||||
ctx.dev_null_fd = open("/dev/null", O_WRONLY);
|
||||
assert(ctx.dev_null_fd >= 0);
|
||||
ctx.mutator_count = 2;
|
||||
|
||||
/* Start threads. */
|
||||
ret = pthread_create(&threads[0], NULL, thread_read, &ctx);
|
||||
assert(ret == 0);
|
||||
ret = pthread_create(&threads[1], NULL, thread_write, &ctx);
|
||||
assert(ret == 0);
|
||||
ret = pthread_create(&threads[2], NULL, thread_execute, &ctx);
|
||||
assert(ret == 0);
|
||||
for (i = 3; i <= 4; i++) {
|
||||
ret = pthread_create(&threads[i], NULL, thread_mutate, &ctx);
|
||||
assert(ret == 0);
|
||||
}
|
||||
|
||||
/* Wait for threads to stop. */
|
||||
for (i = 0; i < sizeof(threads) / sizeof(threads[0]); i++) {
|
||||
ret = pthread_join(threads[i], NULL);
|
||||
assert(ret == 0);
|
||||
}
|
||||
|
||||
/* Destroy memory chunk. */
|
||||
ret = close(ctx.dev_null_fd);
|
||||
assert(ret == 0);
|
||||
ret = munmap(ctx.ptr, PAGE_COUNT * ctx.pagesize);
|
||||
assert(ret == 0);
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue