lib: Updated existing cmsis-core for stm32h7 support

Signed-off-by: Konstantin Vogel <konstantin.vogel@gmx.net>
Signed-off-by: Kevin O'Connor <kevin@koconnor.net>
This commit is contained in:
D4SK 2021-02-10 15:05:18 +00:00 committed by Kevin O'Connor
parent c7b65f50e3
commit 309fbbc104
11 changed files with 3368 additions and 262 deletions

View file

@ -1,11 +1,11 @@
/**************************************************************************//**
* @file cmsis_gcc.h
* @brief CMSIS compiler GCC header file
* @version V5.0.3
* @date 16. January 2018
* @version V5.3.0
* @date 26. March 2020
******************************************************************************/
/*
* Copyright (c) 2009-2017 ARM Limited. All rights reserved.
* Copyright (c) 2009-2020 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -113,7 +113,74 @@
#ifndef __RESTRICT
#define __RESTRICT __restrict
#endif
#ifndef __COMPILER_BARRIER
#define __COMPILER_BARRIER() __ASM volatile("":::"memory")
#endif
/* ######################### Startup and Lowlevel Init ######################## */
#ifndef __PROGRAM_START
/**
\brief Initializes data and bss sections
\details This default implementations initialized all data and additional bss
sections relying on .copy.table and .zero.table specified properly
in the used linker script.
*/
__STATIC_FORCEINLINE __NO_RETURN void __cmsis_start(void)
{
extern void _start(void) __NO_RETURN;
typedef struct {
uint32_t const* src;
uint32_t* dest;
uint32_t wlen;
} __copy_table_t;
typedef struct {
uint32_t* dest;
uint32_t wlen;
} __zero_table_t;
extern const __copy_table_t __copy_table_start__;
extern const __copy_table_t __copy_table_end__;
extern const __zero_table_t __zero_table_start__;
extern const __zero_table_t __zero_table_end__;
for (__copy_table_t const* pTable = &__copy_table_start__; pTable < &__copy_table_end__; ++pTable) {
for(uint32_t i=0u; i<pTable->wlen; ++i) {
pTable->dest[i] = pTable->src[i];
}
}
for (__zero_table_t const* pTable = &__zero_table_start__; pTable < &__zero_table_end__; ++pTable) {
for(uint32_t i=0u; i<pTable->wlen; ++i) {
pTable->dest[i] = 0u;
}
}
_start();
}
#define __PROGRAM_START __cmsis_start
#endif
#ifndef __INITIAL_SP
#define __INITIAL_SP __StackTop
#endif
#ifndef __STACK_LIMIT
#define __STACK_LIMIT __StackLimit
#endif
#ifndef __VECTOR_TABLE
#define __VECTOR_TABLE __Vectors
#endif
#ifndef __VECTOR_TABLE_ATTRIBUTE
#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section(".vectors")))
#endif
/* ########################### Core Function Access ########################### */
/** \ingroup CMSIS_Core_FunctionInterface
@ -246,7 +313,7 @@ __STATIC_FORCEINLINE uint32_t __get_xPSR(void)
*/
__STATIC_FORCEINLINE uint32_t __get_PSP(void)
{
register uint32_t result;
uint32_t result;
__ASM volatile ("MRS %0, psp" : "=r" (result) );
return(result);
@ -261,7 +328,7 @@ __STATIC_FORCEINLINE uint32_t __get_PSP(void)
*/
__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void)
{
register uint32_t result;
uint32_t result;
__ASM volatile ("MRS %0, psp_ns" : "=r" (result) );
return(result);
@ -300,7 +367,7 @@ __STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack)
*/
__STATIC_FORCEINLINE uint32_t __get_MSP(void)
{
register uint32_t result;
uint32_t result;
__ASM volatile ("MRS %0, msp" : "=r" (result) );
return(result);
@ -315,7 +382,7 @@ __STATIC_FORCEINLINE uint32_t __get_MSP(void)
*/
__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void)
{
register uint32_t result;
uint32_t result;
__ASM volatile ("MRS %0, msp_ns" : "=r" (result) );
return(result);
@ -355,7 +422,7 @@ __STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack)
*/
__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void)
{
register uint32_t result;
uint32_t result;
__ASM volatile ("MRS %0, sp_ns" : "=r" (result) );
return(result);
@ -383,7 +450,7 @@ __STATIC_FORCEINLINE uint32_t __get_PRIMASK(void)
{
uint32_t result;
__ASM volatile ("MRS %0, primask" : "=r" (result) :: "memory");
__ASM volatile ("MRS %0, primask" : "=r" (result) );
return(result);
}
@ -398,7 +465,7 @@ __STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void)
{
uint32_t result;
__ASM volatile ("MRS %0, primask_ns" : "=r" (result) :: "memory");
__ASM volatile ("MRS %0, primask_ns" : "=r" (result) );
return(result);
}
#endif
@ -596,7 +663,7 @@ __STATIC_FORCEINLINE uint32_t __get_PSPLIM(void)
// without main extensions, the non-secure PSPLIM is RAZ/WI
return 0U;
#else
register uint32_t result;
uint32_t result;
__ASM volatile ("MRS %0, psplim" : "=r" (result) );
return result;
#endif
@ -617,7 +684,7 @@ __STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void)
// without main extensions, the non-secure PSPLIM is RAZ/WI
return 0U;
#else
register uint32_t result;
uint32_t result;
__ASM volatile ("MRS %0, psplim_ns" : "=r" (result) );
return result;
#endif
@ -683,7 +750,7 @@ __STATIC_FORCEINLINE uint32_t __get_MSPLIM(void)
// without main extensions, the non-secure MSPLIM is RAZ/WI
return 0U;
#else
register uint32_t result;
uint32_t result;
__ASM volatile ("MRS %0, msplim" : "=r" (result) );
return result;
#endif
@ -705,7 +772,7 @@ __STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void)
// without main extensions, the non-secure MSPLIM is RAZ/WI
return 0U;
#else
register uint32_t result;
uint32_t result;
__ASM volatile ("MRS %0, msplim_ns" : "=r" (result) );
return result;
#endif
@ -758,9 +825,6 @@ __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit)
(defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */
#if ((defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) )
/**
\brief Get FPSCR
\details Returns the current value of the Floating Point Status/Control register.
@ -770,7 +834,9 @@ __STATIC_FORCEINLINE uint32_t __get_FPSCR(void)
{
#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
(defined (__FPU_USED ) && (__FPU_USED == 1U)) )
#if __has_builtin(__builtin_arm_get_fpscr) || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
#if __has_builtin(__builtin_arm_get_fpscr)
// Re-enable using built-in when GCC has been fixed
// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
/* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
return __builtin_arm_get_fpscr();
#else
@ -794,7 +860,9 @@ __STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr)
{
#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
(defined (__FPU_USED ) && (__FPU_USED == 1U)) )
#if __has_builtin(__builtin_arm_set_fpscr) || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
#if __has_builtin(__builtin_arm_set_fpscr)
// Re-enable using built-in when GCC has been fixed
// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
/* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
__builtin_arm_set_fpscr(fpscr);
#else
@ -805,10 +873,6 @@ __STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr)
#endif
}
#endif /* ((defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */
/*@} end of CMSIS_Core_RegAccFunctions */
@ -842,7 +906,7 @@ __STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr)
\brief Wait For Interrupt
\details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
*/
#define __WFI() __ASM volatile ("wfi")
#define __WFI() __ASM volatile ("wfi":::"memory")
/**
@ -850,7 +914,7 @@ __STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr)
\details Wait For Event is a hint instruction that permits the processor to enter
a low-power state until one of a number of events occurs.
*/
#define __WFE() __ASM volatile ("wfe")
#define __WFE() __ASM volatile ("wfe":::"memory")
/**
@ -907,7 +971,7 @@ __STATIC_FORCEINLINE uint32_t __REV(uint32_t value)
#else
uint32_t result;
__ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
__ASM ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
return result;
#endif
}
@ -923,7 +987,7 @@ __STATIC_FORCEINLINE uint32_t __REV16(uint32_t value)
{
uint32_t result;
__ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
__ASM ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
return result;
}
@ -941,7 +1005,7 @@ __STATIC_FORCEINLINE int16_t __REVSH(int16_t value)
#else
int16_t result;
__ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
__ASM ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
return result;
#endif
}
@ -988,7 +1052,7 @@ __STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value)
#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
(defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) )
__ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) );
__ASM ("rbit %0, %1" : "=r" (result) : "r" (value) );
#else
uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */
@ -1011,7 +1075,23 @@ __STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value)
\param [in] value Value to count the leading zeros
\return number of leading zeros in value
*/
#define __CLZ (uint8_t)__builtin_clz
__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value)
{
/* Even though __builtin_clz produces a CLZ instruction on ARM, formally
__builtin_clz(0) is undefined behaviour, so handle this case specially.
This guarantees ARM-compatible results if happening to compile on a non-ARM
target, and ensures the compiler doesn't decide to activate any
optimisations using the logic "value was passed to __builtin_clz, so it
is non-zero".
ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a
single CLZ instruction.
*/
if (value == 0U)
{
return 32U;
}
return __builtin_clz(value);
}
#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
@ -1153,11 +1233,11 @@ __STATIC_FORCEINLINE void __CLREX(void)
\param [in] ARG2 Bit position to saturate to (1..32)
\return Saturated value
*/
#define __SSAT(ARG1,ARG2) \
#define __SSAT(ARG1, ARG2) \
__extension__ \
({ \
int32_t __RES, __ARG1 = (ARG1); \
__ASM ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
__ASM volatile ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
__RES; \
})
@ -1169,11 +1249,11 @@ __extension__ \
\param [in] ARG2 Bit position to saturate to (0..31)
\return Saturated value
*/
#define __USAT(ARG1,ARG2) \
#define __USAT(ARG1, ARG2) \
__extension__ \
({ \
uint32_t __RES, __ARG1 = (ARG1); \
__ASM ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
__ASM volatile ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
__RES; \
})
@ -1358,7 +1438,7 @@ __STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr)
{
uint32_t result;
__ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) );
__ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
return ((uint8_t) result);
}
@ -1373,7 +1453,7 @@ __STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr)
{
uint32_t result;
__ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) );
__ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
return ((uint16_t) result);
}
@ -1388,7 +1468,7 @@ __STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr)
{
uint32_t result;
__ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) );
__ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
return(result);
}
@ -1401,7 +1481,7 @@ __STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr)
*/
__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr)
{
__ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
__ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
}
@ -1413,7 +1493,7 @@ __STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr)
*/
__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr)
{
__ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
__ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
}
@ -1425,7 +1505,7 @@ __STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr)
*/
__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr)
{
__ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
__ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
}
@ -1439,7 +1519,7 @@ __STATIC_FORCEINLINE uint8_t __LDAEXB(volatile uint8_t *ptr)
{
uint32_t result;
__ASM volatile ("ldaexb %0, %1" : "=r" (result) : "Q" (*ptr) );
__ASM volatile ("ldaexb %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
return ((uint8_t) result);
}
@ -1454,7 +1534,7 @@ __STATIC_FORCEINLINE uint16_t __LDAEXH(volatile uint16_t *ptr)
{
uint32_t result;
__ASM volatile ("ldaexh %0, %1" : "=r" (result) : "Q" (*ptr) );
__ASM volatile ("ldaexh %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
return ((uint16_t) result);
}
@ -1469,7 +1549,7 @@ __STATIC_FORCEINLINE uint32_t __LDAEX(volatile uint32_t *ptr)
{
uint32_t result;
__ASM volatile ("ldaex %0, %1" : "=r" (result) : "Q" (*ptr) );
__ASM volatile ("ldaex %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
return(result);
}
@ -1486,7 +1566,7 @@ __STATIC_FORCEINLINE uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr)
{
uint32_t result;
__ASM volatile ("stlexb %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) );
__ASM volatile ("stlexb %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
return(result);
}
@ -1503,7 +1583,7 @@ __STATIC_FORCEINLINE uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr)
{
uint32_t result;
__ASM volatile ("stlexh %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) );
__ASM volatile ("stlexh %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
return(result);
}
@ -1520,7 +1600,7 @@ __STATIC_FORCEINLINE uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr)
{
uint32_t result;
__ASM volatile ("stlex %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) );
__ASM volatile ("stlex %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
return(result);
}
@ -1550,7 +1630,7 @@ __STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1558,7 +1638,7 @@ __STATIC_FORCEINLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1574,7 +1654,7 @@ __STATIC_FORCEINLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1582,7 +1662,7 @@ __STATIC_FORCEINLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1599,7 +1679,7 @@ __STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1607,7 +1687,7 @@ __STATIC_FORCEINLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1623,7 +1703,7 @@ __STATIC_FORCEINLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1631,7 +1711,7 @@ __STATIC_FORCEINLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1648,7 +1728,7 @@ __STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1656,7 +1736,7 @@ __STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1672,7 +1752,7 @@ __STATIC_FORCEINLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1680,7 +1760,7 @@ __STATIC_FORCEINLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1696,7 +1776,7 @@ __STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1704,7 +1784,7 @@ __STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1720,7 +1800,7 @@ __STATIC_FORCEINLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1728,7 +1808,7 @@ __STATIC_FORCEINLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1744,7 +1824,7 @@ __STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1752,7 +1832,7 @@ __STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1768,7 +1848,7 @@ __STATIC_FORCEINLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1776,7 +1856,7 @@ __STATIC_FORCEINLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1792,7 +1872,7 @@ __STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1800,7 +1880,7 @@ __STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1816,7 +1896,7 @@ __STATIC_FORCEINLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1824,7 +1904,7 @@ __STATIC_FORCEINLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1832,7 +1912,7 @@ __STATIC_FORCEINLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1840,21 +1920,21 @@ __STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
{
uint32_t result;
__ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
__ASM ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
return(result);
}
#define __SSAT16(ARG1,ARG2) \
#define __SSAT16(ARG1, ARG2) \
({ \
int32_t __RES, __ARG1 = (ARG1); \
__ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
__ASM volatile ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
__RES; \
})
#define __USAT16(ARG1,ARG2) \
#define __USAT16(ARG1, ARG2) \
({ \
uint32_t __RES, __ARG1 = (ARG1); \
__ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
__ASM volatile ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
__RES; \
})
@ -1862,7 +1942,7 @@ __STATIC_FORCEINLINE uint32_t __UXTB16(uint32_t op1)
{
uint32_t result;
__ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
__ASM ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
return(result);
}
@ -1870,7 +1950,7 @@ __STATIC_FORCEINLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -1878,15 +1958,24 @@ __STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1)
{
uint32_t result;
__ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
__ASM ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
return(result);
}
__STATIC_FORCEINLINE uint32_t __SXTB16_RORn(uint32_t op1, uint32_t rotate)
{
uint32_t result;
__ASM ("sxtb16 %0, %1, ROR %2" : "=r" (result) : "r" (op1), "i" (rotate) );
return result;
}
__STATIC_FORCEINLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
__ASM ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
@ -2075,7 +2164,7 @@ __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
{
int32_t result;
__ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
__ASM ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
return(result);
}