From 13e9f0ccdca53c6ae1c5bc901303fc72a380c8f4 Mon Sep 17 00:00:00 2001 From: Markus Klein Date: Sat, 27 Jan 2024 23:11:47 +0100 Subject: [PATCH] Core(A): align compiler header files to core(M) implementation --- .../Core/Include/a-profile/cmsis_armclang_a.h | 312 ++++++++++-------- CMSIS/Core/Include/a-profile/cmsis_clang_a.h | 225 ++++--------- CMSIS/Core/Include/a-profile/cmsis_gcc_a.h | 233 ++++++------- .../Core/Include/m-profile/cmsis_armclang_m.h | 6 +- CMSIS/Core/Include/m-profile/cmsis_gcc_m.h | 1 + 5 files changed, 353 insertions(+), 424 deletions(-) diff --git a/CMSIS/Core/Include/a-profile/cmsis_armclang_a.h b/CMSIS/Core/Include/a-profile/cmsis_armclang_a.h index d301a35f3..2859d174c 100644 --- a/CMSIS/Core/Include/a-profile/cmsis_armclang_a.h +++ b/CMSIS/Core/Include/a-profile/cmsis_armclang_a.h @@ -30,23 +30,17 @@ #define __ASM __asm #endif #ifndef __INLINE - #define __INLINE __inline -#endif -#ifndef __FORCEINLINE - #define __FORCEINLINE __attribute__((always_inline)) + #define __INLINE inline #endif #ifndef __STATIC_INLINE - #define __STATIC_INLINE static __inline + #define __STATIC_INLINE static inline #endif #ifndef __STATIC_FORCEINLINE - #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline #endif #ifndef __NO_RETURN #define __NO_RETURN __attribute__((__noreturn__)) #endif -#ifndef CMSIS_DEPRECATED - #define CMSIS_DEPRECATED __attribute__((deprecated)) -#endif #ifndef __USED #define __USED __attribute__((used)) #endif @@ -59,10 +53,12 @@ #ifndef __PACKED_STRUCT #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) #endif +#ifndef __PACKED_UNION + #define __PACKED_UNION union __attribute__((packed, aligned(1))) +#endif #ifndef __UNALIGNED_UINT16_WRITE #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpacked" -/*lint -esym(9058, T_UINT16_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_WRITE */ __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; #pragma clang diagnostic pop #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) @@ -70,7 +66,6 @@ #ifndef __UNALIGNED_UINT16_READ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpacked" -/*lint -esym(9058, T_UINT16_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_READ */ __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; #pragma clang diagnostic pop #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) @@ -78,7 +73,6 @@ #ifndef __UNALIGNED_UINT32_WRITE #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpacked" -/*lint -esym(9058, T_UINT32_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_WRITE */ __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; #pragma clang diagnostic pop #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) @@ -93,8 +87,8 @@ #ifndef __ALIGNED #define __ALIGNED(x) __attribute__((aligned(x))) #endif -#ifndef __PACKED - #define __PACKED __attribute__((packed)) +#ifndef __RESTRICT + #define __RESTRICT __restrict #endif #ifndef __COMPILER_BARRIER #define __COMPILER_BARRIER() __ASM volatile("":::"memory") @@ -106,14 +100,14 @@ \brief No Operation \details No Operation does nothing. This instruction can be used for code alignment purposes. */ -#define __NOP __builtin_arm_nop +#define __NOP __builtin_arm_nop /** \brief Wait For Interrupt \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. */ -#define __WFI __builtin_arm_wfi +#define __WFI __builtin_arm_wfi /** @@ -121,14 +115,14 @@ \details Wait For Event is a hint instruction that permits the processor to enter a low-power state until one of a number of events occurs. */ -#define __WFE __builtin_arm_wfe +#define __WFE __builtin_arm_wfe /** \brief Send Event \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. */ -#define __SEV __builtin_arm_sev +#define __SEV __builtin_arm_sev /** @@ -137,14 +131,15 @@ so that all instructions following the ISB are fetched from cache or memory, after the instruction has been completed. */ -#define __ISB() __builtin_arm_isb(0xF) +#define __ISB() __builtin_arm_isb(0xF) + /** \brief Data Synchronization Barrier \details Acts as a special kind of Data Memory Barrier. It completes when all explicit memory accesses before this instruction complete. */ -#define __DSB() __builtin_arm_dsb(0xF) +#define __DSB() __builtin_arm_dsb(0xF) /** @@ -152,7 +147,7 @@ \details Ensures the apparent order of the explicit memory operations before and after the instruction, without ensuring their completion. */ -#define __DMB() __builtin_arm_dmb(0xF) +#define __DMB() __builtin_arm_dmb(0xF) /** @@ -161,7 +156,7 @@ \param [in] value Value to reverse \return Reversed value */ -#define __REV(value) __builtin_bswap32(value) +#define __REV(value) __builtin_bswap32(value) /** @@ -170,7 +165,7 @@ \param [in] value Value to reverse \return Reversed value */ -#define __REV16(value) __ROR(__REV(value), 16) +#define __REV16(value) __ROR(__REV(value), 16) /** @@ -244,6 +239,33 @@ __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) } +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT __builtin_arm_ssat + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT __builtin_arm_usat + + +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +#define __CLREX __builtin_arm_clrex + + /** \brief LDR Exclusive (8 bit) \details Executes a exclusive LDR instruction for 8 bit value. @@ -304,31 +326,6 @@ __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) #define __STREXW (uint32_t)__builtin_arm_strex -/** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. - */ -#define __CLREX __builtin_arm_clrex - -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value - */ -#define __SSAT __builtin_arm_ssat - - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -#define __USAT __builtin_arm_usat - /** \brief Rotate Right with Extend (32 bit) \details Moves each bit of a bitstring right by one bit. @@ -425,7 +422,93 @@ __STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) __ASM volatile ("strt %1, %0, #0" : "=Q" (*ptr) : "r" (value) ); } +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_irq(void) +{ + __ASM volatile ("cpsie i" : : : "memory"); +} + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_irq(void) +{ + __ASM volatile ("cpsid i" : : : "memory"); +} + +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_fault_irq(void) +{ + __ASM volatile ("cpsie f" : : : "memory"); +} + + +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_fault_irq(void) +{ + __ASM volatile ("cpsid f" : : : "memory"); +} + + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) +{ +#if (defined(__ARM_FP) && (__ARM_FP >= 1)) + return __builtin_arm_get_fpscr(); +#else + return(0U); +#endif +} + + +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) +{ +#if (defined(__ARM_FP) && (__ARM_FP >= 1)) + __builtin_arm_set_fpscr(fpscr); +#else + (void)fpscr; +#endif +} + + +/*@} end of CMSIS_Core_RegAccFunctions */ + + /* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ #if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) @@ -489,15 +572,52 @@ __STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) #define __QADD __builtin_arm_qadd #define __QSUB __builtin_arm_qsub -#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ - ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) - -#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ - ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) - -#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) +#define __PKHBT(ARG1,ARG2,ARG3) \ +__extension__ \ +({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ + __RES; \ + }) + +#define __PKHTB(ARG1,ARG2,ARG3) \ +__extension__ \ +({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + if (ARG3 == 0) \ + __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ + else \ + __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ + __RES; \ + }) + +__STATIC_FORCEINLINE uint32_t __SXTB16_RORn(uint32_t op1, uint32_t rotate) +{ + uint32_t result; + if (__builtin_constant_p(rotate) && ((rotate == 8U) || (rotate == 16U) || (rotate == 24U))) + { + __ASM volatile("sxtb16 %0, %1, ROR %2" : "=r"(result) : "r"(op1), "i"(rotate)); + } + else + { + result = __SXTB16(__ROR(op1, rotate)); + } + return result; +} -#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) +__STATIC_FORCEINLINE uint32_t __SXTAB16_RORn(uint32_t op1, uint32_t op2, uint32_t rotate) +{ + uint32_t result; + if (__builtin_constant_p(rotate) && ((rotate == 8U) || (rotate == 16U) || (rotate == 24U))) + { + __ASM volatile("sxtab16 %0, %1, %2, ROR %3" : "=r"(result) : "r"(op1), "r"(op2), "i"(rotate)); + } + else + { + result = __SXTAB16(op1, __ROR(op2, rotate)); + } + return result; +} __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) { @@ -508,80 +628,12 @@ __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) } #endif /* (__ARM_FEATURE_DSP == 1) */ +/** @} end of group CMSIS_SIMD_intrinsics */ -/* ########################### Core Function Access ########################### */ - -/** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing the I-bit in the CPSR. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __enable_irq(void) -{ - __ASM volatile ("cpsie i" : : : "memory"); -} - -/** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting the I-bit in the CPSR. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_irq(void) -{ - __ASM volatile ("cpsid i" : : : "memory"); -} - -/** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __enable_fault_irq(void) -{ - __ASM volatile ("cpsie f" : : : "memory"); -} - - -/** - \brief Disable FIQ - \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_fault_irq(void) -{ - __ASM volatile ("cpsid f" : : : "memory"); -} - - -/** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value - */ -__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) -{ -#if (defined(__ARM_FP) && (__ARM_FP >= 1)) - return __builtin_arm_get_fpscr(); -#else - return(0U); -#endif -} - - -/** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set - */ -__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) -{ -#if (defined(__ARM_FP) && (__ARM_FP >= 1)) - __builtin_arm_set_fpscr(fpscr); -#else - (void)fpscr; -#endif -} - +/** \defgroup CMSIS_Core_intrinsics CMSIS Core Intrinsics + Access to dedicated SIMD instructions + @{ +*/ /** \brief Get CPSR Register \return CPSR Register value @@ -772,4 +824,6 @@ __STATIC_INLINE void __FPU_Enable(void) ); } +/*@} end of group CMSIS_Core_intrinsics */ + #endif /* __CMSIS_ARMCLANG_A_H */ diff --git a/CMSIS/Core/Include/a-profile/cmsis_clang_a.h b/CMSIS/Core/Include/a-profile/cmsis_clang_a.h index 9b80f7bbd..cf237e82b 100644 --- a/CMSIS/Core/Include/a-profile/cmsis_clang_a.h +++ b/CMSIS/Core/Include/a-profile/cmsis_clang_a.h @@ -31,6 +31,11 @@ #error Compiler must support ACLE V2.0 #endif /* (__ARM_ACLE >= 200) */ +/* Fallback for __has_builtin */ +#ifndef __has_builtin + #define __has_builtin(x) (0) +#endif + /* CMSIS compiler specific defines */ #ifndef __ASM #define __ASM __asm @@ -38,9 +43,6 @@ #ifndef __INLINE #define __INLINE inline #endif -#ifndef __FORCEINLINE - #define __FORCEINLINE __attribute__((always_inline)) -#endif #ifndef __STATIC_INLINE #define __STATIC_INLINE static inline #endif @@ -50,9 +52,6 @@ #ifndef __NO_RETURN #define __NO_RETURN __attribute__((__noreturn__)) #endif -#ifndef CMSIS_DEPRECATED - #define CMSIS_DEPRECATED __attribute__((deprecated)) -#endif #ifndef __USED #define __USED __attribute__((used)) #endif @@ -65,6 +64,9 @@ #ifndef __PACKED_STRUCT #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) #endif +#ifndef __PACKED_UNION + #define __PACKED_UNION union __attribute__((packed, aligned(1))) +#endif #ifndef __UNALIGNED_UINT16_WRITE #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpacked" @@ -109,14 +111,14 @@ \brief No Operation \details No Operation does nothing. This instruction can be used for code alignment purposes. */ -#define __NOP() __ASM volatile ("nop") +#define __NOP() __nop() /** \brief Wait For Interrupt \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. */ -#define __WFI() __ASM volatile ("wfi":::"memory") +#define __WFI() __wfi() /** @@ -124,14 +126,14 @@ \details Wait For Event is a hint instruction that permits the processor to enter a low-power state until one of a number of events occurs. */ -#define __WFE() __ASM volatile ("wfe":::"memory") +#define __WFE() __wfe() /** \brief Send Event \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. */ -#define __SEV() __ASM volatile ("sev") +#define __SEV() __sev() /** @@ -140,10 +142,7 @@ so that all instructions following the ISB are fetched from cache or memory, after the instruction has been completed. */ -__STATIC_FORCEINLINE void __ISB(void) -{ - __ASM volatile ("isb 0xF":::"memory"); -} +#define __ISB() __isb(0xF) /** @@ -151,10 +150,7 @@ __STATIC_FORCEINLINE void __ISB(void) \details Acts as a special kind of Data Memory Barrier. It completes when all explicit memory accesses before this instruction complete. */ -__STATIC_FORCEINLINE void __DSB(void) -{ - __ASM volatile ("dsb 0xF":::"memory"); -} +#define __DSB() __dsb(0xF) /** @@ -162,10 +158,7 @@ __STATIC_FORCEINLINE void __DSB(void) \details Ensures the apparent order of the explicit memory operations before and after the instruction, without ensuring their completion. */ -__STATIC_FORCEINLINE void __DMB(void) -{ - __ASM volatile ("dmb 0xF":::"memory"); -} +#define __DMB() __dmb(0xF) /** @@ -174,17 +167,7 @@ __STATIC_FORCEINLINE void __DMB(void) \param [in] value Value to reverse \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __REV(uint32_t value) -{ -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) - return __builtin_bswap32(value); -#else - uint32_t result; - - __ASM ("rev %0, %1" : "=r" (result) : "r" (value) ); - return result; -#endif -} +#define __REV(value) __rev(value) /** @@ -193,12 +176,7 @@ __STATIC_FORCEINLINE uint32_t __REV(uint32_t value) \param [in] value Value to reverse \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value) -{ - uint32_t result; - __ASM ("rev16 %0, %1" : "=r" (result) : "r" (value)); - return result; -} +#define __REV16(value) __rev16(value) /** @@ -207,17 +185,7 @@ __STATIC_FORCEINLINE uint32_t __REV16(uint32_t value) \param [in] value Value to reverse \return Reversed value */ -__STATIC_FORCEINLINE int16_t __REVSH(int16_t value) -{ -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - return (int16_t)__builtin_bswap16(value); -#else - int16_t result; - - __ASM ("revsh %0, %1" : "=r" (result) : "r" (value) ); - return result; -#endif -} +#define __REVSH(value) __revsh(value) /** @@ -227,15 +195,7 @@ __STATIC_FORCEINLINE int16_t __REVSH(int16_t value) \param [in] op2 Number of Bits to rotate \return Rotated value */ -__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) -{ - op2 %= 32U; - if (op2 == 0U) - { - return op1; - } - return (op1 >> op2) | (op1 << (32U - op2)); -} +#define __ROR(op1, op2) __ror(op1, op2) /** @@ -254,12 +214,7 @@ __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) \param [in] value Value to reverse \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) -{ - uint32_t result; - __ASM ("rbit %0, %1" : "=r" (result) : "r" (value) ); - return result; -} +#define __RBIT(value) __rbit(value) /** @@ -268,21 +223,34 @@ __STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) \param [in] value Value to count the leading zeros \return number of leading zeros in value */ -__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) -{ - /* Even though __builtin_clz produces a CLZ instruction on ARM, formally - __builtin_clz(0) is undefined behaviour, so handle this case specially. - This guarantees ARM-compatible results if happening to compile on a non-ARM - target, and ensures the compiler doesn't decide to activate any - optimisations using the logic "value was passed to __builtin_clz, so it - is non-zero". - */ - if (value == 0U) - { - return 32U; - } - return __builtin_clz(value); -} +#define __CLZ(value) __clz(value) + + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT(value, sat) __ssat(value, sat) + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT(value, sat) __usat(value, sat) + + +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +#define __CLREX __builtin_arm_clrex /** @@ -291,13 +259,7 @@ __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) \param [in] ptr Pointer to data \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) -{ - uint32_t result; - - __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); - return ((uint8_t) result); /* Add explicit type cast here */ -} +#define __LDREXB (uint8_t)__builtin_arm_ldrex /** @@ -306,13 +268,7 @@ __STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) \param [in] ptr Pointer to data \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) -{ - uint32_t result; - - __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); - return ((uint16_t) result); /* Add explicit type cast here */ -} +#define __LDREXH (uint16_t)__builtin_arm_ldrex /** @@ -321,13 +277,7 @@ __STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) \param [in] ptr Pointer to data \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) -{ - uint32_t result; - - __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); - return(result); -} +#define __LDREXW (uint32_t)__builtin_arm_ldrex /** @@ -338,13 +288,7 @@ __STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) \return 0 Function succeeded \return 1 Function failed */ -__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) -{ - uint32_t result; - - __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); - return(result); -} +#define __STREXB (uint32_t)__builtin_arm_strex /** @@ -355,13 +299,7 @@ __STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) \return 0 Function succeeded \return 1 Function failed */ -__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) -{ - uint32_t result; - - __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); - return(result); -} +#define __STREXH (uint32_t)__builtin_arm_strex /** @@ -372,55 +310,9 @@ __STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) \return 0 Function succeeded \return 1 Function failed */ -__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) -{ - uint32_t result; - - __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); - return(result); -} +#define __STREXW (uint32_t)__builtin_arm_strex -/** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. - */ -__STATIC_FORCEINLINE void __CLREX(void) -{ - __ASM volatile ("clrex" ::: "memory"); -} - -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] ARG1 Value to be saturated - \param [in] ARG2 Bit position to saturate to (1..32) - \return Saturated value - */ -#define __SSAT(ARG1, ARG2) \ -__extension__ \ -({ \ - int32_t __RES, __ARG1 = (ARG1); \ - __ASM volatile ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ - __RES; \ - }) - - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] ARG1 Value to be saturated - \param [in] ARG2 Bit position to saturate to (0..31) - \return Saturated value - */ -#define __USAT(ARG1, ARG2) \ -__extension__ \ -({ \ - uint32_t __RES, __ARG1 = (ARG1); \ - __ASM volatile ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ - __RES; \ - }) - /** \brief Rotate Right with Extend (32 bit) \details Moves each bit of a bitstring right by one bit. @@ -537,7 +429,7 @@ __STATIC_FORCEINLINE void __enable_irq(void) /** \brief Disable IRQ Interrupts \details Disables IRQ interrupts by setting special-purpose register PRIMASK. - Can only be executed in Privileged modes. + Can only be executed in Privileged modes. */ __STATIC_FORCEINLINE void __disable_irq(void) { @@ -565,12 +457,13 @@ __STATIC_FORCEINLINE void __disable_fault_irq(void) __ASM volatile ("cpsid f" : : : "memory"); } + /** \brief Get FPSCR \details Returns the current value of the Floating Point Status/Control register. \return Floating Point Status/Control register value */ -__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) +__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) { #if (defined(__ARM_FP) && (__ARM_FP >= 1)) return __builtin_arm_get_fpscr(); @@ -599,6 +492,10 @@ __STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) /* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ #if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) diff --git a/CMSIS/Core/Include/a-profile/cmsis_gcc_a.h b/CMSIS/Core/Include/a-profile/cmsis_gcc_a.h index f8adca1f2..173b98c9f 100644 --- a/CMSIS/Core/Include/a-profile/cmsis_gcc_a.h +++ b/CMSIS/Core/Include/a-profile/cmsis_gcc_a.h @@ -41,9 +41,6 @@ #ifndef __INLINE #define __INLINE inline #endif -#ifndef __FORCEINLINE - #define __FORCEINLINE __attribute__((always_inline)) -#endif #ifndef __STATIC_INLINE #define __STATIC_INLINE static inline #endif @@ -53,9 +50,6 @@ #ifndef __NO_RETURN #define __NO_RETURN __attribute__((__noreturn__)) #endif -#ifndef CMSIS_DEPRECATED - #define CMSIS_DEPRECATED __attribute__((deprecated)) -#endif #ifndef __USED #define __USED __attribute__((used)) #endif @@ -68,6 +62,9 @@ #ifndef __PACKED_STRUCT #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) #endif +#ifndef __PACKED_UNION + #define __PACKED_UNION union __attribute__((packed, aligned(1))) +#endif #ifndef __UNALIGNED_UINT16_WRITE #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wpacked" @@ -116,14 +113,14 @@ \brief No Operation \details No Operation does nothing. This instruction can be used for code alignment purposes. */ -#define __NOP() __ASM volatile ("nop") +#define __NOP() __ASM volatile ("nop") /** \brief Wait For Interrupt \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. */ -#define __WFI() __ASM volatile ("wfi":::"memory") +#define __WFI() __ASM volatile ("wfi":::"memory") /** @@ -131,14 +128,14 @@ \details Wait For Event is a hint instruction that permits the processor to enter a low-power state until one of a number of events occurs. */ -#define __WFE() __ASM volatile ("wfe":::"memory") +#define __WFE() __ASM volatile ("wfe":::"memory") /** \brief Send Event \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. */ -#define __SEV() __ASM volatile ("sev") +#define __SEV() __ASM volatile ("sev") /** @@ -147,7 +144,7 @@ so that all instructions following the ISB are fetched from cache or memory, after the instruction has been completed. */ -__STATIC_FORCEINLINE void __ISB(void) +__STATIC_FORCEINLINE void __ISB(void) { __ASM volatile ("isb 0xF":::"memory"); } @@ -158,7 +155,7 @@ __STATIC_FORCEINLINE void __ISB(void) \details Acts as a special kind of Data Memory Barrier. It completes when all explicit memory accesses before this instruction complete. */ -__STATIC_FORCEINLINE void __DSB(void) +__STATIC_FORCEINLINE void __DSB(void) { __ASM volatile ("dsb 0xF":::"memory"); } @@ -169,7 +166,7 @@ __STATIC_FORCEINLINE void __DSB(void) \details Ensures the apparent order of the explicit memory operations before and after the instruction, without ensuring their completion. */ -__STATIC_FORCEINLINE void __DMB(void) +__STATIC_FORCEINLINE void __DMB(void) { __ASM volatile ("dmb 0xF":::"memory"); } @@ -181,16 +178,9 @@ __STATIC_FORCEINLINE void __DMB(void) \param [in] value Value to reverse \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __REV(uint32_t value) +__STATIC_FORCEINLINE uint32_t __REV(uint32_t value) { -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) return __builtin_bswap32(value); -#else - uint32_t result; - - __ASM ("rev %0, %1" : "=r" (result) : "r" (value) ); - return result; -#endif } @@ -203,8 +193,9 @@ __STATIC_FORCEINLINE uint32_t __REV(uint32_t value) __STATIC_FORCEINLINE uint32_t __REV16(uint32_t value) { uint32_t result; + __ASM ("rev16 %0, %1" : "=r" (result) : "r" (value)); - return result; + return (result); } @@ -214,16 +205,9 @@ __STATIC_FORCEINLINE uint32_t __REV16(uint32_t value) \param [in] value Value to reverse \return Reversed value */ -__STATIC_FORCEINLINE int16_t __REVSH(int16_t value) +__STATIC_FORCEINLINE int16_t __REVSH(int16_t value) { -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) return (int16_t)__builtin_bswap16(value); -#else - int16_t result; - - __ASM ("revsh %0, %1" : "=r" (result) : "r" (value) ); - return result; -#endif } @@ -252,7 +236,7 @@ __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) \param [in] value is ignored by the processor. If required, a debugger can use it to store additional information about the breakpoint. */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) +#define __BKPT(value) __ASM volatile ("bkpt "#value) /** @@ -261,11 +245,11 @@ __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) \param [in] value Value to reverse \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) +__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) { uint32_t result; __ASM ("rbit %0, %1" : "=r" (result) : "r" (value) ); - return result; + return (result); } @@ -295,61 +279,59 @@ __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) /** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value */ -__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) -{ - uint32_t result; +#define __SSAT(ARG1, ARG2) \ +__extension__ \ +({ \ + int32_t __RES, __ARG1 = (ARG1); \ + __ASM volatile ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ + __RES; \ + }) -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); -#else - /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not - accepted by assembler. So has to use following less efficient pattern. - */ - __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); -#endif - return ((uint8_t) result); /* Add explicit type cast here */ -} + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT(ARG1, ARG2) \ +__extension__ \ +({ \ + uint32_t __RES, __ARG1 = (ARG1); \ + __ASM volatile ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ + __RES; \ + }) /** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. */ -__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) +__STATIC_FORCEINLINE void __CLREX(void) { - uint32_t result; - -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); -#else - /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not - accepted by assembler. So has to use following less efficient pattern. - */ - __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); -#endif - return ((uint16_t) result); /* Add explicit type cast here */ + __ASM volatile ("clrex" ::: "memory"); } /** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) +__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) { - uint32_t result; + uint32_t result; - __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); - return(result); + __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); + return ((uint8_t) result); /* Add explicit type cast here */ } @@ -361,88 +343,78 @@ __STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) \return 0 Function succeeded \return 1 Function failed */ -__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) +__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) { - uint32_t result; + uint32_t result; - __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); - return(result); + __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); + return (result); } /** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) +__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) { - uint32_t result; + uint32_t result; - __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); - return(result); + __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); + return ((uint16_t)result); /* Add explicit type cast here */ } /** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. \param [in] value Value to store \param [in] ptr Pointer to location \return 0 Function succeeded \return 1 Function failed */ -__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) +__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) { - uint32_t result; + uint32_t result; - __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); - return(result); + __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); + return (result); } /** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE void __CLREX(void) +__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) { - __ASM volatile ("clrex" ::: "memory"); + uint32_t result; + + __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); + return (result); } + /** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] ARG1 Value to be saturated - \param [in] ARG2 Bit position to saturate to (1..32) - \return Saturated value + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -#define __SSAT(ARG1, ARG2) \ -__extension__ \ -({ \ - int32_t __RES, __ARG1 = (ARG1); \ - __ASM volatile ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ - __RES; \ - }) +__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) +{ + uint32_t result; + __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); + return (result); +} -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] ARG1 Value to be saturated - \param [in] ARG2 Bit position to saturate to (0..31) - \return Saturated value - */ -#define __USAT(ARG1, ARG2) \ -__extension__ \ -({ \ - uint32_t __RES, __ARG1 = (ARG1); \ - __ASM volatile ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ - __RES; \ - }) /** \brief Rotate Right with Extend (32 bit) @@ -567,6 +539,7 @@ __STATIC_FORCEINLINE void __disable_irq(void) __ASM volatile ("cpsid i" : : : "memory"); } + /** \brief Enable FIQ \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. @@ -593,12 +566,12 @@ __STATIC_FORCEINLINE void __disable_fault_irq(void) \details Returns the current value of the Floating Point Status/Control register. \return Floating Point Status/Control register value */ -__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) +__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) { #if (defined(__ARM_FP) && (__ARM_FP >= 1)) - return __builtin_arm_get_fpscr(); + return (__builtin_arm_get_fpscr()); #else - return(0U); + return (0U); #endif } @@ -618,10 +591,14 @@ __STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) } -/*@} end of CMSIS_Core_RegAccFunctions */ +/** @} end of CMSIS_Core_RegAccFunctions */ /* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ #if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) @@ -929,7 +906,7 @@ __STATIC_INLINE void __FPU_Enable(void) __set_FPSCR(fpscr & 0x00086060ul); } -/*@} end of group CMSIS_Core_intrinsics */ +/** @} end of group CMSIS_Core_intrinsics */ #pragma GCC diagnostic pop diff --git a/CMSIS/Core/Include/m-profile/cmsis_armclang_m.h b/CMSIS/Core/Include/m-profile/cmsis_armclang_m.h index f9d03db9a..8d83a79ad 100644 --- a/CMSIS/Core/Include/m-profile/cmsis_armclang_m.h +++ b/CMSIS/Core/Include/m-profile/cmsis_armclang_m.h @@ -36,13 +36,13 @@ #define __ASM __asm #endif #ifndef __INLINE - #define __INLINE __inline + #define __INLINE inline #endif #ifndef __STATIC_INLINE - #define __STATIC_INLINE static __inline + #define __STATIC_INLINE static inline #endif #ifndef __STATIC_FORCEINLINE - #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline #endif #ifndef __NO_RETURN #define __NO_RETURN __attribute__((__noreturn__)) diff --git a/CMSIS/Core/Include/m-profile/cmsis_gcc_m.h b/CMSIS/Core/Include/m-profile/cmsis_gcc_m.h index f34ae8129..d92aabd83 100644 --- a/CMSIS/Core/Include/m-profile/cmsis_gcc_m.h +++ b/CMSIS/Core/Include/m-profile/cmsis_gcc_m.h @@ -337,6 +337,7 @@ __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) return (op1 >> op2) | (op1 << (32U - op2)); } + /** \brief Breakpoint \details Causes the processor to enter Debug state.