target-arm queue:

* hw/arm_sysctl: fix extracting 31th bit of val
  * hw/misc: cast rpm to uint64_t
  * tests/qtest/boot-serial-test: Improve ASM
  * target/arm: Move minor arithmetic helpers out of helper.c
  * target/arm: change default pauth algorithm to impdef
 -----BEGIN PGP SIGNATURE-----
 
 iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmeFGuUZHHBldGVyLm1h
 eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3mFPEAChT9DR/+bNSt0Q28TsCv84
 dMMXle7c821NHTNeP/uBQ0i3aopmOJE145wMSoZza8l+EYjOdQwHpinjfu8J/rOS
 mJUgAFRcgUoH77+k0p0x1tqKi7+669TznOMOF4RyudKju5SteVyOGgLNjzJlnItq
 3QRBiDTS+qXqAUhgQtzcuY6Xl5M2KA/cpSWYxQf/JPpZMX2c37V8AlSF/1GkLo6Z
 3afrasXUp+U0+03Pe3Ffknzx/LtkLc2hg2LVX8CeqMLRJSA0ohkSwa/xax+2hn+G
 9fKn92IpQOjEFw6qBTBvkerP2hr6yhDFTVFI9v+lsY4bf7tQGIE75HEGZ1EMr26b
 LCIPSQvez9exZl/usLGkUq9MWAiEkhBMy99ajwg5X4IhcbS+oyFtH2teYpt9rd9N
 2dVS5qzErN7TCZQza9A7+bt8v5OtbJk2K8Qx9QhMFU/dIUSp0vOA3NwGu+qkciAb
 wNdoXT22Hy0czDiQ/ln3aocmwWeVZN4+AxKNoigQhor+5oIR4lMn1P7yAmsCLeL8
 AaLXJdR4aLnYugh23qzv9wf9kAbxRBMvLbsNTKGG00DYQ0xoY4pQ2CmPAJoVVxpU
 FjRydG9sC/6sMoJiOoDVpPW003VY2If8r0ObzqUd2gkw1HLf12yug+lij0LkcXKC
 Au7ycaoHiTlluNxyQjsgPg==
 =FGfo
 -----END PGP SIGNATURE-----

Merge tag 'pull-target-arm-20250113' of https://git.linaro.org/people/pmaydell/qemu-arm into staging

target-arm queue:
 * hw/arm_sysctl: fix extracting 31th bit of val
 * hw/misc: cast rpm to uint64_t
 * tests/qtest/boot-serial-test: Improve ASM
 * target/arm: Move minor arithmetic helpers out of helper.c
 * target/arm: change default pauth algorithm to impdef

# -----BEGIN PGP SIGNATURE-----
#
# iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmeFGuUZHHBldGVyLm1h
# eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3mFPEAChT9DR/+bNSt0Q28TsCv84
# dMMXle7c821NHTNeP/uBQ0i3aopmOJE145wMSoZza8l+EYjOdQwHpinjfu8J/rOS
# mJUgAFRcgUoH77+k0p0x1tqKi7+669TznOMOF4RyudKju5SteVyOGgLNjzJlnItq
# 3QRBiDTS+qXqAUhgQtzcuY6Xl5M2KA/cpSWYxQf/JPpZMX2c37V8AlSF/1GkLo6Z
# 3afrasXUp+U0+03Pe3Ffknzx/LtkLc2hg2LVX8CeqMLRJSA0ohkSwa/xax+2hn+G
# 9fKn92IpQOjEFw6qBTBvkerP2hr6yhDFTVFI9v+lsY4bf7tQGIE75HEGZ1EMr26b
# LCIPSQvez9exZl/usLGkUq9MWAiEkhBMy99ajwg5X4IhcbS+oyFtH2teYpt9rd9N
# 2dVS5qzErN7TCZQza9A7+bt8v5OtbJk2K8Qx9QhMFU/dIUSp0vOA3NwGu+qkciAb
# wNdoXT22Hy0czDiQ/ln3aocmwWeVZN4+AxKNoigQhor+5oIR4lMn1P7yAmsCLeL8
# AaLXJdR4aLnYugh23qzv9wf9kAbxRBMvLbsNTKGG00DYQ0xoY4pQ2CmPAJoVVxpU
# FjRydG9sC/6sMoJiOoDVpPW003VY2If8r0ObzqUd2gkw1HLf12yug+lij0LkcXKC
# Au7ycaoHiTlluNxyQjsgPg==
# =FGfo
# -----END PGP SIGNATURE-----
# gpg: Signature made Mon 13 Jan 2025 08:53:41 EST
# gpg:                using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE
# gpg:                issuer "peter.maydell@linaro.org"
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [full]
# gpg:                 aka "Peter Maydell <pmaydell@gmail.com>" [full]
# gpg:                 aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [full]
# gpg:                 aka "Peter Maydell <peter@archaic.org.uk>" [unknown]
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83  15CF 3C25 25ED 1436 0CDE

* tag 'pull-target-arm-20250113' of https://git.linaro.org/people/pmaydell/qemu-arm:
  docs/system/arm/virt: mention specific migration information
  target/arm: change default pauth algorithm to impdef
  tests/tcg/aarch64: force qarma5 for pauth-3 test
  target/arm: add new property to select pauth-qarma5
  target/arm: Move minor arithmetic helpers out of helper.c
  tests/qtest/boot-serial-test: Initialize PL011 Control register
  tests/qtest/boot-serial-test: Reorder pair of instructions in PL011 test
  tests/qtest/boot-serial-test: Reduce for() loop in PL011 tests
  tests/qtest/boot-serial-test: Improve ASM comments of PL011 tests
  hw/misc: cast rpm to uint64_t
  hw/arm_sysctl: fix extracting 31th bit of val

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2025-01-13 09:43:48 -05:00
commit e8aa7fdcdd
17 changed files with 377 additions and 316 deletions

View file

@ -219,8 +219,11 @@ Below is the list of TCG VCPU features and their descriptions.
``pauth-qarma3``
When ``pauth`` is enabled, select the architected QARMA3 algorithm.
Without either ``pauth-impdef`` or ``pauth-qarma3`` enabled,
the architected QARMA5 algorithm is used. The architected QARMA5
``pauth-qarma5``
When ``pauth`` is enabled, select the architected QARMA5 algorithm.
Without ``pauth-impdef``, ``pauth-qarma3`` or ``pauth-qarma5`` enabled,
the QEMU impdef algorithm is used. The architected QARMA5
and QARMA3 algorithms have good cryptographic properties, but can
be quite slow to emulate. The impdef algorithm used by QEMU is
non-cryptographic but significantly faster.

View file

@ -19,6 +19,10 @@ of the 5.0 release and ``virt-5.0`` of the 5.1 release. Migration
is not guaranteed to work between different QEMU releases for
the non-versioned ``virt`` machine type.
VM migration is not guaranteed when using ``-cpu max``, as features
supported may change between QEMU versions. To ensure your VM can be
migrated, it is recommended to use another cpu model instead.
Supported devices
"""""""""""""""""

View file

@ -169,7 +169,7 @@ would default to it anyway.
.. code::
-cpu max,pauth-impdef=on \
-cpu max \
-smp 4 \
-accel tcg \

View file

@ -36,7 +36,9 @@
#include "hw/virtio/virtio-iommu.h"
#include "audio/audio.h"
GlobalProperty hw_compat_9_2[] = {};
GlobalProperty hw_compat_9_2[] = {
{"arm-cpu", "backcompat-pauth-default-use-qarma5", "true"},
};
const size_t hw_compat_9_2_len = G_N_ELEMENTS(hw_compat_9_2);
GlobalProperty hw_compat_9_1[] = {

View file

@ -520,7 +520,7 @@ static void arm_sysctl_write(void *opaque, hwaddr offset,
* as zero.
*/
s->sys_cfgctrl = val & ~((3 << 18) | (1 << 31));
if (val & (1 << 31)) {
if (extract64(val, 31, 1)) {
/* Start bit set -- actually do something */
unsigned int dcc = extract32(s->sys_cfgctrl, 26, 4);
unsigned int function = extract32(s->sys_cfgctrl, 20, 6);

View file

@ -172,8 +172,9 @@ static NPCM7xxMFTCaptureState npcm7xx_mft_compute_cnt(
* RPM = revolution/min. The time for one revlution (in ns) is
* MINUTE_TO_NANOSECOND / RPM.
*/
count = clock_ns_to_ticks(clock, (60 * NANOSECONDS_PER_SECOND) /
(rpm * NPCM7XX_MFT_PULSE_PER_REVOLUTION));
count = clock_ns_to_ticks(clock,
(uint64_t)(60 * NANOSECONDS_PER_SECOND) /
((uint64_t)rpm * NPCM7XX_MFT_PULSE_PER_REVOLUTION));
}
if (count > NPCM7XX_MFT_MAX_CNT) {

View file

@ -94,7 +94,7 @@ static const char *cpu_model_advertised_features[] = {
"sve640", "sve768", "sve896", "sve1024", "sve1152", "sve1280",
"sve1408", "sve1536", "sve1664", "sve1792", "sve1920", "sve2048",
"kvm-no-adjvtime", "kvm-steal-time",
"pauth", "pauth-impdef", "pauth-qarma3",
"pauth", "pauth-impdef", "pauth-qarma3", "pauth-qarma5",
NULL
};

View file

@ -2653,6 +2653,8 @@ static const Property arm_cpu_properties[] = {
DEFINE_PROP_INT32("core-count", ARMCPU, core_count, -1),
/* True to default to the backward-compat old CNTFRQ rather than 1Ghz */
DEFINE_PROP_BOOL("backcompat-cntfrq", ARMCPU, backcompat_cntfrq, false),
DEFINE_PROP_BOOL("backcompat-pauth-default-use-qarma5", ARMCPU,
backcompat_pauth_default_use_qarma5, false),
};
static const gchar *arm_gdb_arch_name(CPUState *cs)

View file

@ -972,6 +972,9 @@ struct ArchCPU {
/* QOM property to indicate we should use the back-compat CNTFRQ default */
bool backcompat_cntfrq;
/* QOM property to indicate we should use the back-compat QARMA5 default */
bool backcompat_pauth_default_use_qarma5;
/* Specify the number of cores in this CPU cluster. Used for the L2CTLR
* register.
*/
@ -1062,6 +1065,7 @@ struct ArchCPU {
bool prop_pauth;
bool prop_pauth_impdef;
bool prop_pauth_qarma3;
bool prop_pauth_qarma5;
bool prop_lpa2;
/* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */

View file

@ -520,25 +520,40 @@ void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
}
if (cpu->prop_pauth) {
if (cpu->prop_pauth_impdef && cpu->prop_pauth_qarma3) {
if ((cpu->prop_pauth_impdef && cpu->prop_pauth_qarma3) ||
(cpu->prop_pauth_impdef && cpu->prop_pauth_qarma5) ||
(cpu->prop_pauth_qarma3 && cpu->prop_pauth_qarma5)) {
error_setg(errp,
"cannot enable both pauth-impdef and pauth-qarma3");
"cannot enable pauth-impdef, pauth-qarma3 and "
"pauth-qarma5 at the same time");
return;
}
if (cpu->prop_pauth_impdef) {
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, API, features);
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPI, 1);
bool use_default = !cpu->prop_pauth_qarma5 &&
!cpu->prop_pauth_qarma3 &&
!cpu->prop_pauth_impdef;
if (cpu->prop_pauth_qarma5 ||
(use_default &&
cpu->backcompat_pauth_default_use_qarma5)) {
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, APA, features);
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPA, 1);
} else if (cpu->prop_pauth_qarma3) {
isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, APA3, features);
isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, GPA3, 1);
} else if (cpu->prop_pauth_impdef ||
(use_default &&
!cpu->backcompat_pauth_default_use_qarma5)) {
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, API, features);
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPI, 1);
} else {
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, APA, features);
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPA, 1);
g_assert_not_reached();
}
} else if (cpu->prop_pauth_impdef || cpu->prop_pauth_qarma3) {
error_setg(errp, "cannot enable pauth-impdef or "
"pauth-qarma3 without pauth");
} else if (cpu->prop_pauth_impdef ||
cpu->prop_pauth_qarma3 ||
cpu->prop_pauth_qarma5) {
error_setg(errp, "cannot enable pauth-impdef, pauth-qarma3 or "
"pauth-qarma5 without pauth");
error_append_hint(errp, "Add pauth=on to the CPU property list.\n");
}
}
@ -553,6 +568,8 @@ static const Property arm_cpu_pauth_impdef_property =
DEFINE_PROP_BOOL("pauth-impdef", ARMCPU, prop_pauth_impdef, false);
static const Property arm_cpu_pauth_qarma3_property =
DEFINE_PROP_BOOL("pauth-qarma3", ARMCPU, prop_pauth_qarma3, false);
static Property arm_cpu_pauth_qarma5_property =
DEFINE_PROP_BOOL("pauth-qarma5", ARMCPU, prop_pauth_qarma5, false);
void aarch64_add_pauth_properties(Object *obj)
{
@ -573,6 +590,7 @@ void aarch64_add_pauth_properties(Object *obj)
} else {
qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_impdef_property);
qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_qarma3_property);
qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_qarma5_property);
}
}

View file

@ -17,11 +17,9 @@
#include "qemu/main-loop.h"
#include "qemu/timer.h"
#include "qemu/bitops.h"
#include "qemu/crc32c.h"
#include "qemu/qemu-print.h"
#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include <zlib.h> /* for crc32 */
#include "hw/irq.h"
#include "system/cpu-timers.h"
#include "system/kvm.h"
@ -10984,289 +10982,6 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
};
}
/*
* Note that signed overflow is undefined in C. The following routines are
* careful to use unsigned types where modulo arithmetic is required.
* Failure to do so _will_ break on newer gcc.
*/
/* Signed saturating arithmetic. */
/* Perform 16-bit signed saturating addition. */
static inline uint16_t add16_sat(uint16_t a, uint16_t b)
{
uint16_t res;
res = a + b;
if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
if (a & 0x8000) {
res = 0x8000;
} else {
res = 0x7fff;
}
}
return res;
}
/* Perform 8-bit signed saturating addition. */
static inline uint8_t add8_sat(uint8_t a, uint8_t b)
{
uint8_t res;
res = a + b;
if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
if (a & 0x80) {
res = 0x80;
} else {
res = 0x7f;
}
}
return res;
}
/* Perform 16-bit signed saturating subtraction. */
static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
{
uint16_t res;
res = a - b;
if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
if (a & 0x8000) {
res = 0x8000;
} else {
res = 0x7fff;
}
}
return res;
}
/* Perform 8-bit signed saturating subtraction. */
static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
{
uint8_t res;
res = a - b;
if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
if (a & 0x80) {
res = 0x80;
} else {
res = 0x7f;
}
}
return res;
}
#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
#define PFX q
#include "op_addsub.h"
/* Unsigned saturating arithmetic. */
static inline uint16_t add16_usat(uint16_t a, uint16_t b)
{
uint16_t res;
res = a + b;
if (res < a) {
res = 0xffff;
}
return res;
}
static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
{
if (a > b) {
return a - b;
} else {
return 0;
}
}
static inline uint8_t add8_usat(uint8_t a, uint8_t b)
{
uint8_t res;
res = a + b;
if (res < a) {
res = 0xff;
}
return res;
}
static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
{
if (a > b) {
return a - b;
} else {
return 0;
}
}
#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
#define PFX uq
#include "op_addsub.h"
/* Signed modulo arithmetic. */
#define SARITH16(a, b, n, op) do { \
int32_t sum; \
sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
RESULT(sum, n, 16); \
if (sum >= 0) \
ge |= 3 << (n * 2); \
} while (0)
#define SARITH8(a, b, n, op) do { \
int32_t sum; \
sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
RESULT(sum, n, 8); \
if (sum >= 0) \
ge |= 1 << n; \
} while (0)
#define ADD16(a, b, n) SARITH16(a, b, n, +)
#define SUB16(a, b, n) SARITH16(a, b, n, -)
#define ADD8(a, b, n) SARITH8(a, b, n, +)
#define SUB8(a, b, n) SARITH8(a, b, n, -)
#define PFX s
#define ARITH_GE
#include "op_addsub.h"
/* Unsigned modulo arithmetic. */
#define ADD16(a, b, n) do { \
uint32_t sum; \
sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
RESULT(sum, n, 16); \
if ((sum >> 16) == 1) \
ge |= 3 << (n * 2); \
} while (0)
#define ADD8(a, b, n) do { \
uint32_t sum; \
sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
RESULT(sum, n, 8); \
if ((sum >> 8) == 1) \
ge |= 1 << n; \
} while (0)
#define SUB16(a, b, n) do { \
uint32_t sum; \
sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
RESULT(sum, n, 16); \
if ((sum >> 16) == 0) \
ge |= 3 << (n * 2); \
} while (0)
#define SUB8(a, b, n) do { \
uint32_t sum; \
sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
RESULT(sum, n, 8); \
if ((sum >> 8) == 0) \
ge |= 1 << n; \
} while (0)
#define PFX u
#define ARITH_GE
#include "op_addsub.h"
/* Halved signed arithmetic. */
#define ADD16(a, b, n) \
RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
#define SUB16(a, b, n) \
RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
#define ADD8(a, b, n) \
RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
#define SUB8(a, b, n) \
RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
#define PFX sh
#include "op_addsub.h"
/* Halved unsigned arithmetic. */
#define ADD16(a, b, n) \
RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
#define SUB16(a, b, n) \
RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
#define ADD8(a, b, n) \
RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
#define SUB8(a, b, n) \
RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
#define PFX uh
#include "op_addsub.h"
static inline uint8_t do_usad(uint8_t a, uint8_t b)
{
if (a > b) {
return a - b;
} else {
return b - a;
}
}
/* Unsigned sum of absolute byte differences. */
uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
{
uint32_t sum;
sum = do_usad(a, b);
sum += do_usad(a >> 8, b >> 8);
sum += do_usad(a >> 16, b >> 16);
sum += do_usad(a >> 24, b >> 24);
return sum;
}
/* For ARMv6 SEL instruction. */
uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
{
uint32_t mask;
mask = 0;
if (flags & 1) {
mask |= 0xff;
}
if (flags & 2) {
mask |= 0xff00;
}
if (flags & 4) {
mask |= 0xff0000;
}
if (flags & 8) {
mask |= 0xff000000;
}
return (a & mask) | (b & ~mask);
}
/*
* CRC helpers.
* The upper bytes of val (above the number specified by 'bytes') must have
* been zeroed out by the caller.
*/
uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
{
uint8_t buf[4];
stl_le_p(buf, val);
/* zlib crc32 converts the accumulator and output to one's complement. */
return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
}
uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
{
uint8_t buf[4];
stl_le_p(buf, val);
/* Linux crc32c converts the output to one's complement. */
return crc32c(acc, buf, bytes) ^ 0xffffffff;
}
/*
* Return the exception level to which FP-disabled exceptions should

View file

@ -0,0 +1,296 @@
/*
* ARM generic helpers for various arithmetical operations.
*
* This code is licensed under the GNU GPL v2 or later.
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
#include "qemu/crc32c.h"
#include <zlib.h> /* for crc32 */
/*
* Note that signed overflow is undefined in C. The following routines are
* careful to use unsigned types where modulo arithmetic is required.
* Failure to do so _will_ break on newer gcc.
*/
/* Signed saturating arithmetic. */
/* Perform 16-bit signed saturating addition. */
static inline uint16_t add16_sat(uint16_t a, uint16_t b)
{
uint16_t res;
res = a + b;
if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
if (a & 0x8000) {
res = 0x8000;
} else {
res = 0x7fff;
}
}
return res;
}
/* Perform 8-bit signed saturating addition. */
static inline uint8_t add8_sat(uint8_t a, uint8_t b)
{
uint8_t res;
res = a + b;
if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
if (a & 0x80) {
res = 0x80;
} else {
res = 0x7f;
}
}
return res;
}
/* Perform 16-bit signed saturating subtraction. */
static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
{
uint16_t res;
res = a - b;
if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
if (a & 0x8000) {
res = 0x8000;
} else {
res = 0x7fff;
}
}
return res;
}
/* Perform 8-bit signed saturating subtraction. */
static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
{
uint8_t res;
res = a - b;
if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
if (a & 0x80) {
res = 0x80;
} else {
res = 0x7f;
}
}
return res;
}
#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
#define PFX q
#include "op_addsub.c.inc"
/* Unsigned saturating arithmetic. */
static inline uint16_t add16_usat(uint16_t a, uint16_t b)
{
uint16_t res;
res = a + b;
if (res < a) {
res = 0xffff;
}
return res;
}
static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
{
if (a > b) {
return a - b;
} else {
return 0;
}
}
static inline uint8_t add8_usat(uint8_t a, uint8_t b)
{
uint8_t res;
res = a + b;
if (res < a) {
res = 0xff;
}
return res;
}
static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
{
if (a > b) {
return a - b;
} else {
return 0;
}
}
#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
#define PFX uq
#include "op_addsub.c.inc"
/* Signed modulo arithmetic. */
#define SARITH16(a, b, n, op) do { \
int32_t sum; \
sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
RESULT(sum, n, 16); \
if (sum >= 0) \
ge |= 3 << (n * 2); \
} while (0)
#define SARITH8(a, b, n, op) do { \
int32_t sum; \
sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
RESULT(sum, n, 8); \
if (sum >= 0) \
ge |= 1 << n; \
} while (0)
#define ADD16(a, b, n) SARITH16(a, b, n, +)
#define SUB16(a, b, n) SARITH16(a, b, n, -)
#define ADD8(a, b, n) SARITH8(a, b, n, +)
#define SUB8(a, b, n) SARITH8(a, b, n, -)
#define PFX s
#define ARITH_GE
#include "op_addsub.c.inc"
/* Unsigned modulo arithmetic. */
#define ADD16(a, b, n) do { \
uint32_t sum; \
sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
RESULT(sum, n, 16); \
if ((sum >> 16) == 1) \
ge |= 3 << (n * 2); \
} while (0)
#define ADD8(a, b, n) do { \
uint32_t sum; \
sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
RESULT(sum, n, 8); \
if ((sum >> 8) == 1) \
ge |= 1 << n; \
} while (0)
#define SUB16(a, b, n) do { \
uint32_t sum; \
sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
RESULT(sum, n, 16); \
if ((sum >> 16) == 0) \
ge |= 3 << (n * 2); \
} while (0)
#define SUB8(a, b, n) do { \
uint32_t sum; \
sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
RESULT(sum, n, 8); \
if ((sum >> 8) == 0) \
ge |= 1 << n; \
} while (0)
#define PFX u
#define ARITH_GE
#include "op_addsub.c.inc"
/* Halved signed arithmetic. */
#define ADD16(a, b, n) \
RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
#define SUB16(a, b, n) \
RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
#define ADD8(a, b, n) \
RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
#define SUB8(a, b, n) \
RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
#define PFX sh
#include "op_addsub.c.inc"
/* Halved unsigned arithmetic. */
#define ADD16(a, b, n) \
RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
#define SUB16(a, b, n) \
RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
#define ADD8(a, b, n) \
RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
#define SUB8(a, b, n) \
RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
#define PFX uh
#include "op_addsub.c.inc"
static inline uint8_t do_usad(uint8_t a, uint8_t b)
{
if (a > b) {
return a - b;
} else {
return b - a;
}
}
/* Unsigned sum of absolute byte differences. */
uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
{
uint32_t sum;
sum = do_usad(a, b);
sum += do_usad(a >> 8, b >> 8);
sum += do_usad(a >> 16, b >> 16);
sum += do_usad(a >> 24, b >> 24);
return sum;
}
/* For ARMv6 SEL instruction. */
uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
{
uint32_t mask;
mask = 0;
if (flags & 1) {
mask |= 0xff;
}
if (flags & 2) {
mask |= 0xff00;
}
if (flags & 4) {
mask |= 0xff0000;
}
if (flags & 8) {
mask |= 0xff000000;
}
return (a & mask) | (b & ~mask);
}
/*
* CRC helpers.
* The upper bytes of val (above the number specified by 'bytes') must have
* been zeroed out by the caller.
*/
uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
{
uint8_t buf[4];
stl_le_p(buf, val);
/* zlib crc32 converts the accumulator and output to one's complement. */
return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
}
uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
{
uint8_t buf[4];
stl_le_p(buf, val);
/* Linux crc32c converts the output to one's complement. */
return crc32c(acc, buf, bytes) ^ 0xffffffff;
}

View file

@ -40,6 +40,7 @@ arm_ss.add(files(
'tlb_helper.c',
'vec_helper.c',
'tlb-insns.c',
'arith_helper.c',
))
arm_ss.add(when: 'TARGET_AARCH64', if_true: files(

View file

@ -419,21 +419,28 @@ static void pauth_tests_default(QTestState *qts, const char *cpu_type)
assert_has_feature_enabled(qts, cpu_type, "pauth");
assert_has_feature_disabled(qts, cpu_type, "pauth-impdef");
assert_has_feature_disabled(qts, cpu_type, "pauth-qarma3");
assert_has_feature_disabled(qts, cpu_type, "pauth-qarma5");
assert_set_feature(qts, cpu_type, "pauth", false);
assert_set_feature(qts, cpu_type, "pauth", true);
assert_set_feature(qts, cpu_type, "pauth-impdef", true);
assert_set_feature(qts, cpu_type, "pauth-impdef", false);
assert_set_feature(qts, cpu_type, "pauth-qarma3", true);
assert_set_feature(qts, cpu_type, "pauth-qarma3", false);
assert_set_feature(qts, cpu_type, "pauth-qarma5", true);
assert_set_feature(qts, cpu_type, "pauth-qarma5", false);
assert_error(qts, cpu_type,
"cannot enable pauth-impdef or pauth-qarma3 without pauth",
"cannot enable pauth-impdef, pauth-qarma3 or pauth-qarma5 without pauth",
"{ 'pauth': false, 'pauth-impdef': true }");
assert_error(qts, cpu_type,
"cannot enable pauth-impdef or pauth-qarma3 without pauth",
"cannot enable pauth-impdef, pauth-qarma3 or pauth-qarma5 without pauth",
"{ 'pauth': false, 'pauth-qarma3': true }");
assert_error(qts, cpu_type,
"cannot enable both pauth-impdef and pauth-qarma3",
"{ 'pauth': true, 'pauth-impdef': true, 'pauth-qarma3': true }");
"cannot enable pauth-impdef, pauth-qarma3 or pauth-qarma5 without pauth",
"{ 'pauth': false, 'pauth-qarma5': true }");
assert_error(qts, cpu_type,
"cannot enable pauth-impdef, pauth-qarma3 and pauth-qarma5 at the same time",
"{ 'pauth': true, 'pauth-impdef': true, 'pauth-qarma3': true,"
" 'pauth-qarma5': true }");
}
static void test_query_cpu_model_expansion(const void *data)

View file

@ -70,18 +70,23 @@ static const uint8_t kernel_plml605[] = {
};
static const uint8_t bios_raspi2[] = {
0x08, 0x30, 0x9f, 0xe5, /* ldr r3,[pc,#8] Get base */
0x54, 0x20, 0xa0, 0xe3, /* mov r2,#'T' */
0x00, 0x20, 0xc3, 0xe5, /* strb r2,[r3] */
0xfb, 0xff, 0xff, 0xea, /* b loop */
0x00, 0x10, 0x20, 0x3f, /* 0x3f201000 = UART0 base addr */
0x10, 0x30, 0x9f, 0xe5, /* ldr r3, [pc, #16] Get &UART0 */
0x10, 0x20, 0x9f, 0xe5, /* ldr r2, [pc, #16] Get &CR */
0xb0, 0x23, 0xc3, 0xe1, /* strh r2, [r3, #48] Set CR */
0x54, 0x20, 0xa0, 0xe3, /* mov r2, #'T' */
0x00, 0x20, 0xc3, 0xe5, /* loop: strb r2, [r3] *TXDAT = 'T' */
0xff, 0xff, 0xff, 0xea, /* b -4 (loop) */
0x00, 0x10, 0x20, 0x3f, /* UART0: 0x3f201000 */
0x01, 0x01, 0x00, 0x00, /* CR: 0x101 = UARTEN|TXE */
};
static const uint8_t kernel_aarch64[] = {
0x81, 0x0a, 0x80, 0x52, /* mov w1, #0x54 */
0x02, 0x20, 0xa1, 0xd2, /* mov x2, #0x9000000 */
0x41, 0x00, 0x00, 0x39, /* strb w1, [x2] */
0xfd, 0xff, 0xff, 0x17, /* b -12 (loop) */
0x02, 0x20, 0xa1, 0xd2, /* mov x2, #0x9000000 Load UART0 */
0x21, 0x20, 0x80, 0x52, /* mov w1, 0x101 CR = UARTEN|TXE */
0x41, 0x60, 0x00, 0x79, /* strh w1, [x2, #48] Set CR */
0x81, 0x0a, 0x80, 0x52, /* mov w1, #'T' */
0x41, 0x00, 0x00, 0x39, /* loop: strb w1, [x2] *TXDAT = 'T' */
0xff, 0xff, 0xff, 0x17, /* b -4 (loop) */
};
static const uint8_t kernel_nrf51[] = {

View file

@ -91,6 +91,9 @@ EXTRA_RUNS+=run-memory-replay
ifneq ($(CROSS_CC_HAS_ARMV8_3),)
pauth-3: CFLAGS += $(CROSS_CC_HAS_ARMV8_3)
# This test explicitly checks the output of the pauth operation so we
# must force the use of the QARMA5 algorithm for it.
run-pauth-3: QEMU_BASE_MACHINE=-M virt -cpu max,pauth-qarma5=on -display none
else
pauth-3:
$(call skip-test, "BUILD of $@", "missing compiler support")