mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-06 17:23:56 -06:00
meson: Detect atomic128 support with optimization
There is an edge condition prior to gcc13 for which optimization is required to generate 16-byte atomic sequences. Detect this. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
35c653c402
commit
e61f1efeb7
2 changed files with 59 additions and 22 deletions
|
@ -16,6 +16,23 @@
|
|||
#endif
|
||||
#define HAVE_al8_fast (ATOMIC_REG_SIZE >= 8)
|
||||
|
||||
/*
|
||||
* If __alignof(unsigned __int128) < 16, GCC may refuse to inline atomics
|
||||
* that are supported by the host, e.g. s390x. We can force the pointer to
|
||||
* have our known alignment with __builtin_assume_aligned, however prior to
|
||||
* GCC 13 that was only reliable with optimization enabled. See
|
||||
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107389
|
||||
*/
|
||||
#if defined(CONFIG_ATOMIC128_OPT)
|
||||
# if !defined(__OPTIMIZE__)
|
||||
# define ATTRIBUTE_ATOMIC128_OPT __attribute__((optimize("O1")))
|
||||
# endif
|
||||
# define CONFIG_ATOMIC128
|
||||
#endif
|
||||
#ifndef ATTRIBUTE_ATOMIC128_OPT
|
||||
# define ATTRIBUTE_ATOMIC128_OPT
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ATOMIC128)
|
||||
# define HAVE_al16_fast true
|
||||
#else
|
||||
|
@ -152,7 +169,8 @@ static inline uint64_t load_atomic8(void *pv)
|
|||
*
|
||||
* Atomically load 16 aligned bytes from @pv.
|
||||
*/
|
||||
static inline Int128 load_atomic16(void *pv)
|
||||
static inline Int128 ATTRIBUTE_ATOMIC128_OPT
|
||||
load_atomic16(void *pv)
|
||||
{
|
||||
#ifdef CONFIG_ATOMIC128
|
||||
__uint128_t *p = __builtin_assume_aligned(pv, 16);
|
||||
|
@ -356,7 +374,8 @@ static uint64_t load_atom_extract_al16_or_exit(CPUArchState *env, uintptr_t ra,
|
|||
* cross an 16-byte boundary then the access must be 16-byte atomic,
|
||||
* otherwise the access must be 8-byte atomic.
|
||||
*/
|
||||
static inline uint64_t load_atom_extract_al16_or_al8(void *pv, int s)
|
||||
static inline uint64_t ATTRIBUTE_ATOMIC128_OPT
|
||||
load_atom_extract_al16_or_al8(void *pv, int s)
|
||||
{
|
||||
#if defined(CONFIG_ATOMIC128)
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
|
@ -692,7 +711,8 @@ static inline void store_atomic8(void *pv, uint64_t val)
|
|||
*
|
||||
* Atomically store 16 aligned bytes to @pv.
|
||||
*/
|
||||
static inline void store_atomic16(void *pv, Int128Alias val)
|
||||
static inline void ATTRIBUTE_ATOMIC128_OPT
|
||||
store_atomic16(void *pv, Int128Alias val)
|
||||
{
|
||||
#if defined(CONFIG_ATOMIC128)
|
||||
__uint128_t *pu = __builtin_assume_aligned(pv, 16);
|
||||
|
@ -790,7 +810,8 @@ static void store_atom_insert_al8(uint64_t *p, uint64_t val, uint64_t msk)
|
|||
*
|
||||
* Atomically store @val to @p masked by @msk.
|
||||
*/
|
||||
static void store_atom_insert_al16(Int128 *ps, Int128Alias val, Int128Alias msk)
|
||||
static void ATTRIBUTE_ATOMIC128_OPT
|
||||
store_atom_insert_al16(Int128 *ps, Int128Alias val, Int128Alias msk)
|
||||
{
|
||||
#if defined(CONFIG_ATOMIC128)
|
||||
__uint128_t *pu, old, new;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue