mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-06 17:23:56 -06:00
Fix safe_syscall_base for sparc64.
Fix host signal handling for sparc64-linux. Speedups for jump cache and work list probing. Fix for exception replays. Raise guest SIGBUS for user-only misaligned accesses. -----BEGIN PGP SIGNATURE----- iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmIFu3QdHHJpY2hhcmQu aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV9tHwf+KFk9Pa9M+vhnHTZJ ZvIRs9BaSzoDYqxLlHSKmXN3w3G5PbIcbHVHXTty2o28bT0jk05T9zQn3TzMfcbl O+Yx8rygUJmbzlEQ+GaSI69pppFj8ahlS/ylfwd5MABZun2mawexEU9sqXqGCKR9 kJY8IpkZ6vqEDONcS1ZMQ+HFsNvw6LYBd567SY8g9ZsyPLWtQSqwdcuPqAJDFWCv zNe6b07IRoFVOsbtQix9Dl/ntMxk5jto+UvdEVuW2FJOeRZJRshLWF5cGHNavSgQ Culb5ALOzoxSlcZ4xfVfWtBGoFr/BNu9D0omTSmbosvXAd4HmPVxD6kV17wXV3+g G/cvew== =D+x7 -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/rth-gitlab/tags/pull-tcg-20220211' into staging Fix safe_syscall_base for sparc64. Fix host signal handling for sparc64-linux. Speedups for jump cache and work list probing. Fix for exception replays. Raise guest SIGBUS for user-only misaligned accesses. # gpg: Signature made Fri 11 Feb 2022 01:27:16 GMT # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * remotes/rth-gitlab/tags/pull-tcg-20220211: (34 commits) tests/tcg/multiarch: Add sigbus.c tcg/sparc: Support unaligned access for user-only tcg/sparc: Add tcg_out_jmpl_const for better tail calls tcg/sparc: Use the constant pool for 64-bit constants tcg/sparc: Convert patch_reloc to return bool tcg/sparc: Improve code gen for shifted 32-bit constants tcg/sparc: Add scratch argument to tcg_out_movi_int tcg/sparc: Split out tcg_out_movi_imm32 tcg/sparc: Use tcg_out_movi_imm13 in tcg_out_addsub2_i64 tcg/mips: Support unaligned access for softmmu tcg/mips: Support unaligned access for user-only tcg/arm: Support raising sigbus for user-only tcg/arm: Reserve a register for guest_base tcg/arm: Support unaligned access for softmmu tcg/arm: Check alignment for ldrd and strd tcg/arm: Remove use_armv6_instructions tcg/arm: Remove use_armv5t_instructions tcg/arm: Drop support for armv4 and armv5 hosts tcg/loongarch64: Support raising sigbus for user-only tcg/tci: Support raising sigbus for user-only ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
50a75ff680
36 changed files with 1561 additions and 495 deletions
68
tests/tcg/multiarch/sigbus.c
Normal file
68
tests/tcg/multiarch/sigbus.c
Normal file
|
@ -0,0 +1,68 @@
|
|||
#define _GNU_SOURCE 1
|
||||
|
||||
#include <assert.h>
|
||||
#include <stdlib.h>
|
||||
#include <signal.h>
|
||||
#include <endian.h>
|
||||
|
||||
|
||||
unsigned long long x = 0x8877665544332211ull;
|
||||
void * volatile p = (void *)&x + 1;
|
||||
|
||||
void sigbus(int sig, siginfo_t *info, void *uc)
|
||||
{
|
||||
assert(sig == SIGBUS);
|
||||
assert(info->si_signo == SIGBUS);
|
||||
#ifdef BUS_ADRALN
|
||||
assert(info->si_code == BUS_ADRALN);
|
||||
#endif
|
||||
assert(info->si_addr == p);
|
||||
exit(EXIT_SUCCESS);
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
struct sigaction sa = {
|
||||
.sa_sigaction = sigbus,
|
||||
.sa_flags = SA_SIGINFO
|
||||
};
|
||||
int allow_fail = 0;
|
||||
int tmp;
|
||||
|
||||
tmp = sigaction(SIGBUS, &sa, NULL);
|
||||
assert(tmp == 0);
|
||||
|
||||
/*
|
||||
* Select an operation that's likely to enforce alignment.
|
||||
* On many guests that support unaligned accesses by default,
|
||||
* this is often an atomic operation.
|
||||
*/
|
||||
#if defined(__aarch64__)
|
||||
asm volatile("ldxr %w0,[%1]" : "=r"(tmp) : "r"(p) : "memory");
|
||||
#elif defined(__alpha__)
|
||||
asm volatile("ldl_l %0,0(%1)" : "=r"(tmp) : "r"(p) : "memory");
|
||||
#elif defined(__arm__)
|
||||
asm volatile("ldrex %0,[%1]" : "=r"(tmp) : "r"(p) : "memory");
|
||||
#elif defined(__powerpc__)
|
||||
asm volatile("lwarx %0,0,%1" : "=r"(tmp) : "r"(p) : "memory");
|
||||
#elif defined(__riscv_atomic)
|
||||
asm volatile("lr.w %0,(%1)" : "=r"(tmp) : "r"(p) : "memory");
|
||||
#else
|
||||
/* No insn known to fault unaligned -- try for a straight load. */
|
||||
allow_fail = 1;
|
||||
tmp = *(volatile int *)p;
|
||||
#endif
|
||||
|
||||
assert(allow_fail);
|
||||
|
||||
/*
|
||||
* We didn't see a signal.
|
||||
* We might as well validate the unaligned load worked.
|
||||
*/
|
||||
if (BYTE_ORDER == LITTLE_ENDIAN) {
|
||||
assert(tmp == 0x55443322);
|
||||
} else {
|
||||
assert(tmp == 0x77665544);
|
||||
}
|
||||
return EXIT_SUCCESS;
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue