From 9e9fad760a2d05d056f8a789892141a509400225 Mon Sep 17 00:00:00 2001 From: Markus Klein Date: Sun, 4 Dec 2022 18:14:19 +0100 Subject: [PATCH] * Retire the Core_A folder and use a single Core folder for all processor core headers. * Separate folders for different architecture profiles (A-profile, R-profile and M-profile) * Top-level compiler headers will exist in the Core folder (no separate Compiler folder is needed). ** The cmsis_compiler.h header will continue to figure out which compiler toolchain is being used. ** The compiler toolchain specific headers in the Core folder will contain code common to all architecture profiles. ** Compiler headers specific to each architecture profiles can reside within the new architecture profile folders. ** The second-level files like cmsis_gcc.h are named cmsis_gcc_a.h, cmsis_gcc_r.h and cmsis_gcc_m.h to avoid having multiple files with the same name *For any features that are common to more than one architecture profile, e.g, the GIC is common to A-profile and R-profile, we can include the GIC header in each architectural folder, but only one of these files contains the code to avoid duplication. The file with the same name that doesn't include the code will simply include the code from the other folder- this should hopefully be clear to developers. * removed deprecated core_armv8xxx.h files. The files were added because no specific ARM v8-M device was available when Arm v-M support was added. * removed deprecated file cmsis_armcc_a.h. This compiler support is obsolete Add initial support for some Cortex-A and Cortex-R devices --- CMSIS/Core/Include/a-profile/armv7a.h | 2279 +++++++++++++ .../a-profile/{cmsis_cp15.h => armv7a_cp15.h} | 4 +- CMSIS/Core/Include/a-profile/armv8a.h | 647 ++++ .../Include/a-profile/armv8a_system_control.h | 143 + .../Core/Include/a-profile/cmsis_armclang_a.h | 507 +-- CMSIS/Core/Include/a-profile/cmsis_clang_a.h | 168 + CMSIS/Core/Include/a-profile/cmsis_gcc_a.h | 811 +---- CMSIS/Core/Include/a-profile/cmsis_iccarm_a.h | 398 +-- CMSIS/Core/Include/a-profile/gicv2.h | 757 +++++ CMSIS/Core/Include/a-profile/irq_ctrl.h | 2 +- CMSIS/Core/Include/cmsis_compiler.h | 62 +- CMSIS/Core/Include/cmsis_version.h | 34 +- CMSIS/Core/Include/core_ca.h | 2928 +---------------- CMSIS/Core/Include/core_ca35.h | 41 + CMSIS/Core/Include/core_ca5.h | 41 + CMSIS/Core/Include/core_ca53.h | 41 + CMSIS/Core/Include/core_ca55.h | 41 + CMSIS/Core/Include/core_ca57.h | 41 + CMSIS/Core/Include/core_ca7.h | 41 + CMSIS/Core/Include/core_ca9.h | 41 + CMSIS/Core/Include/core_cr4.h | 41 + CMSIS/Core/Include/core_cr5.h | 41 + CMSIS/Core/Include/core_cr52.h | 41 + CMSIS/Core/Include/core_cr7.h | 41 + CMSIS/Core/Include/core_cr8.h | 41 + .../Core/Include/m-profile/cmsis_armclang_m.h | 1779 +++------- CMSIS/Core/Include/m-profile/cmsis_clang_m.h | 1782 +++------- CMSIS/Core/Include/m-profile/cmsis_gcc_m.h | 1990 +++-------- CMSIS/Core/Include/m-profile/cmsis_iccarm_m.h | 619 +--- .../Include/m-profile/cmsis_tiarmclang_m.h | 1382 ++------ CMSIS/Core/Include/r-profile/armv7r.h | 471 +++ CMSIS/Core/Include/r-profile/armv7r_cp15.h | 27 + CMSIS/Core/Include/r-profile/armv8r.h | 355 ++ .../Include/r-profile/armv8r_system_control.h | 27 + .../Core/Include/r-profile/cmsis_armclang_r.h | 161 + CMSIS/Core/Include/r-profile/cmsis_clang_r.h | 160 + CMSIS/Core/Include/r-profile/cmsis_gcc_r.h | 162 + CMSIS/Core/Include/r-profile/cmsis_iccarm_r.h | 39 + CMSIS/Core/Include/r-profile/gicv2.h | 26 + .../Target/CA5/RTE/Device/ARMCA5/mmu_ARMCA5.c | 2 +- .../Target/CA7/RTE/Device/ARMCA7/mmu_ARMCA7.c | 2 +- .../Target/CA9/RTE/Device/ARMCA9/mmu_ARMCA9.c | 2 +- CMSIS/DoxyGen/Core/Core.dxy.in | 2 +- CMSIS/DoxyGen/Core_A/Core_A.dxy.in | 18 +- CMSIS/DoxyGen/Core_A/src/Overview.txt | 2 +- CMSIS/DoxyGen/Core_A/src/Template.txt | 40 +- .../Core_A/src/{core_ca.txt => arm7a.txt} | 4 +- README.md | 3 +- 48 files changed, 7904 insertions(+), 10383 deletions(-) create mode 100644 CMSIS/Core/Include/a-profile/armv7a.h rename CMSIS/Core/Include/a-profile/{cmsis_cp15.h => armv7a_cp15.h} (99%) create mode 100644 CMSIS/Core/Include/a-profile/armv8a.h create mode 100644 CMSIS/Core/Include/a-profile/armv8a_system_control.h create mode 100644 CMSIS/Core/Include/a-profile/cmsis_clang_a.h create mode 100644 CMSIS/Core/Include/a-profile/gicv2.h create mode 100644 CMSIS/Core/Include/core_ca35.h create mode 100644 CMSIS/Core/Include/core_ca5.h create mode 100644 CMSIS/Core/Include/core_ca53.h create mode 100644 CMSIS/Core/Include/core_ca55.h create mode 100644 CMSIS/Core/Include/core_ca57.h create mode 100644 CMSIS/Core/Include/core_ca7.h create mode 100644 CMSIS/Core/Include/core_ca9.h create mode 100644 CMSIS/Core/Include/core_cr4.h create mode 100644 CMSIS/Core/Include/core_cr5.h create mode 100644 CMSIS/Core/Include/core_cr52.h create mode 100644 CMSIS/Core/Include/core_cr7.h create mode 100644 CMSIS/Core/Include/core_cr8.h create mode 100644 CMSIS/Core/Include/r-profile/armv7r.h create mode 100644 CMSIS/Core/Include/r-profile/armv7r_cp15.h create mode 100644 CMSIS/Core/Include/r-profile/armv8r.h create mode 100644 CMSIS/Core/Include/r-profile/armv8r_system_control.h create mode 100644 CMSIS/Core/Include/r-profile/cmsis_armclang_r.h create mode 100644 CMSIS/Core/Include/r-profile/cmsis_clang_r.h create mode 100644 CMSIS/Core/Include/r-profile/cmsis_gcc_r.h create mode 100644 CMSIS/Core/Include/r-profile/cmsis_iccarm_r.h create mode 100644 CMSIS/Core/Include/r-profile/gicv2.h rename CMSIS/DoxyGen/Core_A/src/{core_ca.txt => arm7a.txt} (99%) diff --git a/CMSIS/Core/Include/a-profile/armv7a.h b/CMSIS/Core/Include/a-profile/armv7a.h new file mode 100644 index 000000000..d8ba59d34 --- /dev/null +++ b/CMSIS/Core/Include/a-profile/armv7a.h @@ -0,0 +1,2279 @@ +/**************************************************************************//** + * @file armv7a.h + * @brief CMSIS Cortex-A Core Peripheral Access Layer Header File for ARMv7-A + * @version V1.0.8 + * @date 23. March 2023 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __ARM_V7A_GENERIC +#define __ARM_V7A_GENERIC + +#ifdef __cplusplus + extern "C" { +#endif + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ + /** + \ingroup ARMv7-A + @{ + */ + +#include "cmsis_version.h" + +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TMS470__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ +#include "armv7a_cp15.h" + +#ifdef __cplusplus +} +#endif + +#endif /* __ARM_V7A_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __ARM_V7A_DEPENDANT +#define __ARM_V7A_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + + /* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __GIC_PRESENT + #define __GIC_PRESENT 1U + #warning "__GIC_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __TIM_PRESENT + #define __TIM_PRESENT 1U + #warning "__TIM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __L2C_PRESENT + #define __L2C_PRESENT 0U + #warning "__L2C_PRESENT not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +#ifdef __cplusplus + #define __I volatile /*!< \brief Defines 'read only' permissions */ +#else + #define __I volatile const /*!< \brief Defines 'read only' permissions */ +#endif +#define __O volatile /*!< \brief Defines 'write only' permissions */ +#define __IO volatile /*!< \brief Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*!< \brief Defines 'read only' structure member permissions */ +#define __OM volatile /*!< \brief Defines 'write only' structure member permissions */ +#define __IOM volatile /*!< \brief Defines 'read / write' structure member permissions */ +#define RESERVED(N, T) T RESERVED##N; // placeholder struct members used for "reserved" areas + +/** @} end of group ARMv7-A */ + + + + /******************************************************************************* + * Register Abstraction + Core Register contain: + - CPSR + - CP15 Registers + - L2C-310 Cache Controller + - Generic Interrupt Controller Distributor + - Generic Interrupt Controller Interface + ******************************************************************************/ + +/* Core Register CPSR */ +typedef union +{ + struct + { + uint32_t M:5; /*!< \brief bit: 0.. 4 Mode field */ + uint32_t T:1; /*!< \brief bit: 5 Thumb execution state bit */ + uint32_t F:1; /*!< \brief bit: 6 FIQ mask bit */ + uint32_t I:1; /*!< \brief bit: 7 IRQ mask bit */ + uint32_t A:1; /*!< \brief bit: 8 Asynchronous abort mask bit */ + uint32_t E:1; /*!< \brief bit: 9 Endianness execution state bit */ + uint32_t IT1:6; /*!< \brief bit: 10..15 If-Then execution state bits 2-7 */ + uint32_t GE:4; /*!< \brief bit: 16..19 Greater than or Equal flags */ + RESERVED(0:4, uint32_t) + uint32_t J:1; /*!< \brief bit: 24 Jazelle bit */ + uint32_t IT0:2; /*!< \brief bit: 25..26 If-Then execution state bits 0-1 */ + uint32_t Q:1; /*!< \brief bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< \brief bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< \brief bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< \brief bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< \brief bit: 31 Negative condition code flag */ + } b; /*!< \brief Structure used for bit access */ + uint32_t w; /*!< \brief Type used for word access */ +} CPSR_Type; + + + +/* CPSR Register Definitions */ +#define CPSR_N_Pos 31U /*!< \brief CPSR: N Position */ +#define CPSR_N_Msk (1UL << CPSR_N_Pos) /*!< \brief CPSR: N Mask */ + +#define CPSR_Z_Pos 30U /*!< \brief CPSR: Z Position */ +#define CPSR_Z_Msk (1UL << CPSR_Z_Pos) /*!< \brief CPSR: Z Mask */ + +#define CPSR_C_Pos 29U /*!< \brief CPSR: C Position */ +#define CPSR_C_Msk (1UL << CPSR_C_Pos) /*!< \brief CPSR: C Mask */ + +#define CPSR_V_Pos 28U /*!< \brief CPSR: V Position */ +#define CPSR_V_Msk (1UL << CPSR_V_Pos) /*!< \brief CPSR: V Mask */ + +#define CPSR_Q_Pos 27U /*!< \brief CPSR: Q Position */ +#define CPSR_Q_Msk (1UL << CPSR_Q_Pos) /*!< \brief CPSR: Q Mask */ + +#define CPSR_IT0_Pos 25U /*!< \brief CPSR: IT0 Position */ +#define CPSR_IT0_Msk (3UL << CPSR_IT0_Pos) /*!< \brief CPSR: IT0 Mask */ + +#define CPSR_J_Pos 24U /*!< \brief CPSR: J Position */ +#define CPSR_J_Msk (1UL << CPSR_J_Pos) /*!< \brief CPSR: J Mask */ + +#define CPSR_GE_Pos 16U /*!< \brief CPSR: GE Position */ +#define CPSR_GE_Msk (0xFUL << CPSR_GE_Pos) /*!< \brief CPSR: GE Mask */ + +#define CPSR_IT1_Pos 10U /*!< \brief CPSR: IT1 Position */ +#define CPSR_IT1_Msk (0x3FUL << CPSR_IT1_Pos) /*!< \brief CPSR: IT1 Mask */ + +#define CPSR_E_Pos 9U /*!< \brief CPSR: E Position */ +#define CPSR_E_Msk (1UL << CPSR_E_Pos) /*!< \brief CPSR: E Mask */ + +#define CPSR_A_Pos 8U /*!< \brief CPSR: A Position */ +#define CPSR_A_Msk (1UL << CPSR_A_Pos) /*!< \brief CPSR: A Mask */ + +#define CPSR_I_Pos 7U /*!< \brief CPSR: I Position */ +#define CPSR_I_Msk (1UL << CPSR_I_Pos) /*!< \brief CPSR: I Mask */ + +#define CPSR_F_Pos 6U /*!< \brief CPSR: F Position */ +#define CPSR_F_Msk (1UL << CPSR_F_Pos) /*!< \brief CPSR: F Mask */ + +#define CPSR_T_Pos 5U /*!< \brief CPSR: T Position */ +#define CPSR_T_Msk (1UL << CPSR_T_Pos) /*!< \brief CPSR: T Mask */ + +#define CPSR_M_Pos 0U /*!< \brief CPSR: M Position */ +#define CPSR_M_Msk (0x1FUL << CPSR_M_Pos) /*!< \brief CPSR: M Mask */ + +#define CPSR_M_USR 0x10U /*!< \brief CPSR: M User mode (PL0) */ +#define CPSR_M_FIQ 0x11U /*!< \brief CPSR: M Fast Interrupt mode (PL1) */ +#define CPSR_M_IRQ 0x12U /*!< \brief CPSR: M Interrupt mode (PL1) */ +#define CPSR_M_SVC 0x13U /*!< \brief CPSR: M Supervisor mode (PL1) */ +#define CPSR_M_MON 0x16U /*!< \brief CPSR: M Monitor mode (PL1) */ +#define CPSR_M_ABT 0x17U /*!< \brief CPSR: M Abort mode (PL1) */ +#define CPSR_M_HYP 0x1AU /*!< \brief CPSR: M Hypervisor mode (PL2) */ +#define CPSR_M_UND 0x1BU /*!< \brief CPSR: M Undefined mode (PL1) */ +#define CPSR_M_SYS 0x1FU /*!< \brief CPSR: M System mode (PL1) */ + +/* CP15 Register SCTLR */ +typedef union +{ + struct + { + uint32_t M:1; /*!< \brief bit: 0 MMU enable */ + uint32_t A:1; /*!< \brief bit: 1 Alignment check enable */ + uint32_t C:1; /*!< \brief bit: 2 Cache enable */ + RESERVED(0:2, uint32_t) + uint32_t CP15BEN:1; /*!< \brief bit: 5 CP15 barrier enable */ + RESERVED(1:1, uint32_t) + uint32_t B:1; /*!< \brief bit: 7 Endianness model */ + RESERVED(2:2, uint32_t) + uint32_t SW:1; /*!< \brief bit: 10 SWP and SWPB enable */ + uint32_t Z:1; /*!< \brief bit: 11 Branch prediction enable */ + uint32_t I:1; /*!< \brief bit: 12 Instruction cache enable */ + uint32_t V:1; /*!< \brief bit: 13 Vectors bit */ + uint32_t RR:1; /*!< \brief bit: 14 Round Robin select */ + RESERVED(3:2, uint32_t) + uint32_t HA:1; /*!< \brief bit: 17 Hardware Access flag enable */ + RESERVED(4:1, uint32_t) + uint32_t WXN:1; /*!< \brief bit: 19 Write permission implies XN */ + uint32_t UWXN:1; /*!< \brief bit: 20 Unprivileged write permission implies PL1 XN */ + uint32_t FI:1; /*!< \brief bit: 21 Fast interrupts configuration enable */ + uint32_t U:1; /*!< \brief bit: 22 Alignment model */ + RESERVED(5:1, uint32_t) + uint32_t VE:1; /*!< \brief bit: 24 Interrupt Vectors Enable */ + uint32_t EE:1; /*!< \brief bit: 25 Exception Endianness */ + RESERVED(6:1, uint32_t) + uint32_t NMFI:1; /*!< \brief bit: 27 Non-maskable FIQ (NMFI) support */ + uint32_t TRE:1; /*!< \brief bit: 28 TEX remap enable. */ + uint32_t AFE:1; /*!< \brief bit: 29 Access flag enable */ + uint32_t TE:1; /*!< \brief bit: 30 Thumb Exception enable */ + RESERVED(7:1, uint32_t) + } b; /*!< \brief Structure used for bit access */ + uint32_t w; /*!< \brief Type used for word access */ +} SCTLR_Type; + +#define SCTLR_TE_Pos 30U /*!< \brief SCTLR: TE Position */ +#define SCTLR_TE_Msk (1UL << SCTLR_TE_Pos) /*!< \brief SCTLR: TE Mask */ + +#define SCTLR_AFE_Pos 29U /*!< \brief SCTLR: AFE Position */ +#define SCTLR_AFE_Msk (1UL << SCTLR_AFE_Pos) /*!< \brief SCTLR: AFE Mask */ + +#define SCTLR_TRE_Pos 28U /*!< \brief SCTLR: TRE Position */ +#define SCTLR_TRE_Msk (1UL << SCTLR_TRE_Pos) /*!< \brief SCTLR: TRE Mask */ + +#define SCTLR_NMFI_Pos 27U /*!< \brief SCTLR: NMFI Position */ +#define SCTLR_NMFI_Msk (1UL << SCTLR_NMFI_Pos) /*!< \brief SCTLR: NMFI Mask */ + +#define SCTLR_EE_Pos 25U /*!< \brief SCTLR: EE Position */ +#define SCTLR_EE_Msk (1UL << SCTLR_EE_Pos) /*!< \brief SCTLR: EE Mask */ + +#define SCTLR_VE_Pos 24U /*!< \brief SCTLR: VE Position */ +#define SCTLR_VE_Msk (1UL << SCTLR_VE_Pos) /*!< \brief SCTLR: VE Mask */ + +#define SCTLR_U_Pos 22U /*!< \brief SCTLR: U Position */ +#define SCTLR_U_Msk (1UL << SCTLR_U_Pos) /*!< \brief SCTLR: U Mask */ + +#define SCTLR_FI_Pos 21U /*!< \brief SCTLR: FI Position */ +#define SCTLR_FI_Msk (1UL << SCTLR_FI_Pos) /*!< \brief SCTLR: FI Mask */ + +#define SCTLR_UWXN_Pos 20U /*!< \brief SCTLR: UWXN Position */ +#define SCTLR_UWXN_Msk (1UL << SCTLR_UWXN_Pos) /*!< \brief SCTLR: UWXN Mask */ + +#define SCTLR_WXN_Pos 19U /*!< \brief SCTLR: WXN Position */ +#define SCTLR_WXN_Msk (1UL << SCTLR_WXN_Pos) /*!< \brief SCTLR: WXN Mask */ + +#define SCTLR_HA_Pos 17U /*!< \brief SCTLR: HA Position */ +#define SCTLR_HA_Msk (1UL << SCTLR_HA_Pos) /*!< \brief SCTLR: HA Mask */ + +#define SCTLR_RR_Pos 14U /*!< \brief SCTLR: RR Position */ +#define SCTLR_RR_Msk (1UL << SCTLR_RR_Pos) /*!< \brief SCTLR: RR Mask */ + +#define SCTLR_V_Pos 13U /*!< \brief SCTLR: V Position */ +#define SCTLR_V_Msk (1UL << SCTLR_V_Pos) /*!< \brief SCTLR: V Mask */ + +#define SCTLR_I_Pos 12U /*!< \brief SCTLR: I Position */ +#define SCTLR_I_Msk (1UL << SCTLR_I_Pos) /*!< \brief SCTLR: I Mask */ + +#define SCTLR_Z_Pos 11U /*!< \brief SCTLR: Z Position */ +#define SCTLR_Z_Msk (1UL << SCTLR_Z_Pos) /*!< \brief SCTLR: Z Mask */ + +#define SCTLR_SW_Pos 10U /*!< \brief SCTLR: SW Position */ +#define SCTLR_SW_Msk (1UL << SCTLR_SW_Pos) /*!< \brief SCTLR: SW Mask */ + +#define SCTLR_B_Pos 7U /*!< \brief SCTLR: B Position */ +#define SCTLR_B_Msk (1UL << SCTLR_B_Pos) /*!< \brief SCTLR: B Mask */ + +#define SCTLR_CP15BEN_Pos 5U /*!< \brief SCTLR: CP15BEN Position */ +#define SCTLR_CP15BEN_Msk (1UL << SCTLR_CP15BEN_Pos) /*!< \brief SCTLR: CP15BEN Mask */ + +#define SCTLR_C_Pos 2U /*!< \brief SCTLR: C Position */ +#define SCTLR_C_Msk (1UL << SCTLR_C_Pos) /*!< \brief SCTLR: C Mask */ + +#define SCTLR_A_Pos 1U /*!< \brief SCTLR: A Position */ +#define SCTLR_A_Msk (1UL << SCTLR_A_Pos) /*!< \brief SCTLR: A Mask */ + +#define SCTLR_M_Pos 0U /*!< \brief SCTLR: M Position */ +#define SCTLR_M_Msk (1UL << SCTLR_M_Pos) /*!< \brief SCTLR: M Mask */ + +/* CP15 Register ACTLR */ +typedef union +{ +#if __CORTEX_A == 5 || defined(DOXYGEN) + /** \brief Structure used for bit access on Cortex-A5 */ + struct + { + uint32_t FW:1; /*!< \brief bit: 0 Cache and TLB maintenance broadcast */ + RESERVED(0:5, uint32_t) + uint32_t SMP:1; /*!< \brief bit: 6 Enables coherent requests to the processor */ + uint32_t EXCL:1; /*!< \brief bit: 7 Exclusive L1/L2 cache control */ + RESERVED(1:2, uint32_t) + uint32_t DODMBS:1; /*!< \brief bit: 10 Disable optimized data memory barrier behavior */ + uint32_t DWBST:1; /*!< \brief bit: 11 AXI data write bursts to Normal memory */ + uint32_t RADIS:1; /*!< \brief bit: 12 L1 Data Cache read-allocate mode disable */ + uint32_t L1PCTL:2; /*!< \brief bit:13..14 L1 Data prefetch control */ + uint32_t BP:2; /*!< \brief bit:16..15 Branch prediction policy */ + uint32_t RSDIS:1; /*!< \brief bit: 17 Disable return stack operation */ + uint32_t BTDIS:1; /*!< \brief bit: 18 Disable indirect Branch Target Address Cache (BTAC) */ + RESERVED(3:9, uint32_t) + uint32_t DBDI:1; /*!< \brief bit: 28 Disable branch dual issue */ + RESERVED(7:3, uint32_t) + } b; +#endif +#if __CORTEX_A == 7 || defined(DOXYGEN) + /** \brief Structure used for bit access on Cortex-A7 */ + struct + { + RESERVED(0:6, uint32_t) + uint32_t SMP:1; /*!< \brief bit: 6 Enables coherent requests to the processor */ + RESERVED(1:3, uint32_t) + uint32_t DODMBS:1; /*!< \brief bit: 10 Disable optimized data memory barrier behavior */ + uint32_t L2RADIS:1; /*!< \brief bit: 11 L2 Data Cache read-allocate mode disable */ + uint32_t L1RADIS:1; /*!< \brief bit: 12 L1 Data Cache read-allocate mode disable */ + uint32_t L1PCTL:2; /*!< \brief bit:13..14 L1 Data prefetch control */ + uint32_t DDVM:1; /*!< \brief bit: 15 Disable Distributed Virtual Memory (DVM) transactions */ + RESERVED(3:12, uint32_t) + uint32_t DDI:1; /*!< \brief bit: 28 Disable dual issue */ + RESERVED(7:3, uint32_t) + } b; +#endif +#if __CORTEX_A == 9 || defined(DOXYGEN) + /** \brief Structure used for bit access on Cortex-A9 */ + struct + { + uint32_t FW:1; /*!< \brief bit: 0 Cache and TLB maintenance broadcast */ + RESERVED(0:1, uint32_t) + uint32_t L1PE:1; /*!< \brief bit: 2 Dside prefetch */ + uint32_t WFLZM:1; /*!< \brief bit: 3 Cache and TLB maintenance broadcast */ + RESERVED(1:2, uint32_t) + uint32_t SMP:1; /*!< \brief bit: 6 Enables coherent requests to the processor */ + uint32_t EXCL:1; /*!< \brief bit: 7 Exclusive L1/L2 cache control */ + uint32_t AOW:1; /*!< \brief bit: 8 Enable allocation in one cache way only */ + uint32_t PARITY:1; /*!< \brief bit: 9 Support for parity checking, if implemented */ + RESERVED(7:22, uint32_t) + } b; +#endif + uint32_t w; /*!< \brief Type used for word access */ +} ACTLR_Type; + +#define ACTLR_DDI_Pos 28U /*!< \brief ACTLR: DDI Position */ +#define ACTLR_DDI_Msk (1UL << ACTLR_DDI_Pos) /*!< \brief ACTLR: DDI Mask */ + +#define ACTLR_DBDI_Pos 28U /*!< \brief ACTLR: DBDI Position */ +#define ACTLR_DBDI_Msk (1UL << ACTLR_DBDI_Pos) /*!< \brief ACTLR: DBDI Mask */ + +#define ACTLR_BTDIS_Pos 18U /*!< \brief ACTLR: BTDIS Position */ +#define ACTLR_BTDIS_Msk (1UL << ACTLR_BTDIS_Pos) /*!< \brief ACTLR: BTDIS Mask */ + +#define ACTLR_RSDIS_Pos 17U /*!< \brief ACTLR: RSDIS Position */ +#define ACTLR_RSDIS_Msk (1UL << ACTLR_RSDIS_Pos) /*!< \brief ACTLR: RSDIS Mask */ + +#define ACTLR_BP_Pos 15U /*!< \brief ACTLR: BP Position */ +#define ACTLR_BP_Msk (3UL << ACTLR_BP_Pos) /*!< \brief ACTLR: BP Mask */ + +#define ACTLR_DDVM_Pos 15U /*!< \brief ACTLR: DDVM Position */ +#define ACTLR_DDVM_Msk (1UL << ACTLR_DDVM_Pos) /*!< \brief ACTLR: DDVM Mask */ + +#define ACTLR_L1PCTL_Pos 13U /*!< \brief ACTLR: L1PCTL Position */ +#define ACTLR_L1PCTL_Msk (3UL << ACTLR_L1PCTL_Pos) /*!< \brief ACTLR: L1PCTL Mask */ + +#define ACTLR_RADIS_Pos 12U /*!< \brief ACTLR: RADIS Position */ +#define ACTLR_RADIS_Msk (1UL << ACTLR_RADIS_Pos) /*!< \brief ACTLR: RADIS Mask */ + +#define ACTLR_L1RADIS_Pos 12U /*!< \brief ACTLR: L1RADIS Position */ +#define ACTLR_L1RADIS_Msk (1UL << ACTLR_L1RADIS_Pos) /*!< \brief ACTLR: L1RADIS Mask */ + +#define ACTLR_DWBST_Pos 11U /*!< \brief ACTLR: DWBST Position */ +#define ACTLR_DWBST_Msk (1UL << ACTLR_DWBST_Pos) /*!< \brief ACTLR: DWBST Mask */ + +#define ACTLR_L2RADIS_Pos 11U /*!< \brief ACTLR: L2RADIS Position */ +#define ACTLR_L2RADIS_Msk (1UL << ACTLR_L2RADIS_Pos) /*!< \brief ACTLR: L2RADIS Mask */ + +#define ACTLR_DODMBS_Pos 10U /*!< \brief ACTLR: DODMBS Position */ +#define ACTLR_DODMBS_Msk (1UL << ACTLR_DODMBS_Pos) /*!< \brief ACTLR: DODMBS Mask */ + +#define ACTLR_PARITY_Pos 9U /*!< \brief ACTLR: PARITY Position */ +#define ACTLR_PARITY_Msk (1UL << ACTLR_PARITY_Pos) /*!< \brief ACTLR: PARITY Mask */ + +#define ACTLR_AOW_Pos 8U /*!< \brief ACTLR: AOW Position */ +#define ACTLR_AOW_Msk (1UL << ACTLR_AOW_Pos) /*!< \brief ACTLR: AOW Mask */ + +#define ACTLR_EXCL_Pos 7U /*!< \brief ACTLR: EXCL Position */ +#define ACTLR_EXCL_Msk (1UL << ACTLR_EXCL_Pos) /*!< \brief ACTLR: EXCL Mask */ + +#define ACTLR_SMP_Pos 6U /*!< \brief ACTLR: SMP Position */ +#define ACTLR_SMP_Msk (1UL << ACTLR_SMP_Pos) /*!< \brief ACTLR: SMP Mask */ + +#define ACTLR_WFLZM_Pos 3U /*!< \brief ACTLR: WFLZM Position */ +#define ACTLR_WFLZM_Msk (1UL << ACTLR_WFLZM_Pos) /*!< \brief ACTLR: WFLZM Mask */ + +#define ACTLR_L1PE_Pos 2U /*!< \brief ACTLR: L1PE Position */ +#define ACTLR_L1PE_Msk (1UL << ACTLR_L1PE_Pos) /*!< \brief ACTLR: L1PE Mask */ + +#define ACTLR_FW_Pos 0U /*!< \brief ACTLR: FW Position */ +#define ACTLR_FW_Msk (1UL << ACTLR_FW_Pos) /*!< \brief ACTLR: FW Mask */ + +/* CP15 Register CPACR */ +typedef union +{ + struct + { + uint32_t CP0:2; /*!< \brief bit: 0..1 Access rights for coprocessor 0 */ + uint32_t CP1:2; /*!< \brief bit: 2..3 Access rights for coprocessor 1 */ + uint32_t CP2:2; /*!< \brief bit: 4..5 Access rights for coprocessor 2 */ + uint32_t CP3:2; /*!< \brief bit: 6..7 Access rights for coprocessor 3 */ + uint32_t CP4:2; /*!< \brief bit: 8..9 Access rights for coprocessor 4 */ + uint32_t CP5:2; /*!< \brief bit:10..11 Access rights for coprocessor 5 */ + uint32_t CP6:2; /*!< \brief bit:12..13 Access rights for coprocessor 6 */ + uint32_t CP7:2; /*!< \brief bit:14..15 Access rights for coprocessor 7 */ + uint32_t CP8:2; /*!< \brief bit:16..17 Access rights for coprocessor 8 */ + uint32_t CP9:2; /*!< \brief bit:18..19 Access rights for coprocessor 9 */ + uint32_t CP10:2; /*!< \brief bit:20..21 Access rights for coprocessor 10 */ + uint32_t CP11:2; /*!< \brief bit:22..23 Access rights for coprocessor 11 */ + uint32_t CP12:2; /*!< \brief bit:24..25 Access rights for coprocessor 11 */ + uint32_t CP13:2; /*!< \brief bit:26..27 Access rights for coprocessor 11 */ + uint32_t TRCDIS:1; /*!< \brief bit: 28 Disable CP14 access to trace registers */ + RESERVED(0:1, uint32_t) + uint32_t D32DIS:1; /*!< \brief bit: 30 Disable use of registers D16-D31 of the VFP register file */ + uint32_t ASEDIS:1; /*!< \brief bit: 31 Disable Advanced SIMD Functionality */ + } b; /*!< \brief Structure used for bit access */ + uint32_t w; /*!< \brief Type used for word access */ +} CPACR_Type; + +#define CPACR_ASEDIS_Pos 31U /*!< \brief CPACR: ASEDIS Position */ +#define CPACR_ASEDIS_Msk (1UL << CPACR_ASEDIS_Pos) /*!< \brief CPACR: ASEDIS Mask */ + +#define CPACR_D32DIS_Pos 30U /*!< \brief CPACR: D32DIS Position */ +#define CPACR_D32DIS_Msk (1UL << CPACR_D32DIS_Pos) /*!< \brief CPACR: D32DIS Mask */ + +#define CPACR_TRCDIS_Pos 28U /*!< \brief CPACR: D32DIS Position */ +#define CPACR_TRCDIS_Msk (1UL << CPACR_D32DIS_Pos) /*!< \brief CPACR: D32DIS Mask */ + +#define CPACR_CP_Pos_(n) (n*2U) /*!< \brief CPACR: CPn Position */ +#define CPACR_CP_Msk_(n) (3UL << CPACR_CP_Pos_(n)) /*!< \brief CPACR: CPn Mask */ + +#define CPACR_CP_NA 0U /*!< \brief CPACR CPn field: Access denied. */ +#define CPACR_CP_PL1 1U /*!< \brief CPACR CPn field: Accessible from PL1 only. */ +#define CPACR_CP_FA 3U /*!< \brief CPACR CPn field: Full access. */ + +/* CP15 Register DFSR */ +typedef union +{ + struct + { + uint32_t FS0:4; /*!< \brief bit: 0.. 3 Fault Status bits bit 0-3 */ + uint32_t Domain:4; /*!< \brief bit: 4.. 7 Fault on which domain */ + RESERVED(0:1, uint32_t) + uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */ + uint32_t FS1:1; /*!< \brief bit: 10 Fault Status bits bit 4 */ + uint32_t WnR:1; /*!< \brief bit: 11 Write not Read bit */ + uint32_t ExT:1; /*!< \brief bit: 12 External abort type */ + uint32_t CM:1; /*!< \brief bit: 13 Cache maintenance fault */ + RESERVED(1:18, uint32_t) + } s; /*!< \brief Structure used for bit access in short format */ + struct + { + uint32_t STATUS:5; /*!< \brief bit: 0.. 5 Fault Status bits */ + RESERVED(0:3, uint32_t) + uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */ + RESERVED(1:1, uint32_t) + uint32_t WnR:1; /*!< \brief bit: 11 Write not Read bit */ + uint32_t ExT:1; /*!< \brief bit: 12 External abort type */ + uint32_t CM:1; /*!< \brief bit: 13 Cache maintenance fault */ + RESERVED(2:18, uint32_t) + } l; /*!< \brief Structure used for bit access in long format */ + uint32_t w; /*!< \brief Type used for word access */ +} DFSR_Type; + +#define DFSR_CM_Pos 13U /*!< \brief DFSR: CM Position */ +#define DFSR_CM_Msk (1UL << DFSR_CM_Pos) /*!< \brief DFSR: CM Mask */ + +#define DFSR_Ext_Pos 12U /*!< \brief DFSR: Ext Position */ +#define DFSR_Ext_Msk (1UL << DFSR_Ext_Pos) /*!< \brief DFSR: Ext Mask */ + +#define DFSR_WnR_Pos 11U /*!< \brief DFSR: WnR Position */ +#define DFSR_WnR_Msk (1UL << DFSR_WnR_Pos) /*!< \brief DFSR: WnR Mask */ + +#define DFSR_FS1_Pos 10U /*!< \brief DFSR: FS1 Position */ +#define DFSR_FS1_Msk (1UL << DFSR_FS1_Pos) /*!< \brief DFSR: FS1 Mask */ + +#define DFSR_LPAE_Pos 9U /*!< \brief DFSR: LPAE Position */ +#define DFSR_LPAE_Msk (1UL << DFSR_LPAE_Pos) /*!< \brief DFSR: LPAE Mask */ + +#define DFSR_Domain_Pos 4U /*!< \brief DFSR: Domain Position */ +#define DFSR_Domain_Msk (0xFUL << DFSR_Domain_Pos) /*!< \brief DFSR: Domain Mask */ + +#define DFSR_FS0_Pos 0U /*!< \brief DFSR: FS0 Position */ +#define DFSR_FS0_Msk (0xFUL << DFSR_FS0_Pos) /*!< \brief DFSR: FS0 Mask */ + +#define DFSR_STATUS_Pos 0U /*!< \brief DFSR: STATUS Position */ +#define DFSR_STATUS_Msk (0x3FUL << DFSR_STATUS_Pos) /*!< \brief DFSR: STATUS Mask */ + +/* CP15 Register IFSR */ +typedef union +{ + struct + { + uint32_t FS0:4; /*!< \brief bit: 0.. 3 Fault Status bits bit 0-3 */ + RESERVED(0:5, uint32_t) + uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */ + uint32_t FS1:1; /*!< \brief bit: 10 Fault Status bits bit 4 */ + RESERVED(1:1, uint32_t) + uint32_t ExT:1; /*!< \brief bit: 12 External abort type */ + RESERVED(2:19, uint32_t) + } s; /*!< \brief Structure used for bit access in short format */ + struct + { + uint32_t STATUS:6; /*!< \brief bit: 0.. 5 Fault Status bits */ + RESERVED(0:3, uint32_t) + uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */ + RESERVED(1:2, uint32_t) + uint32_t ExT:1; /*!< \brief bit: 12 External abort type */ + RESERVED(2:19, uint32_t) + } l; /*!< \brief Structure used for bit access in long format */ + uint32_t w; /*!< \brief Type used for word access */ +} IFSR_Type; + +#define IFSR_ExT_Pos 12U /*!< \brief IFSR: ExT Position */ +#define IFSR_ExT_Msk (1UL << IFSR_ExT_Pos) /*!< \brief IFSR: ExT Mask */ + +#define IFSR_FS1_Pos 10U /*!< \brief IFSR: FS1 Position */ +#define IFSR_FS1_Msk (1UL << IFSR_FS1_Pos) /*!< \brief IFSR: FS1 Mask */ + +#define IFSR_LPAE_Pos 9U /*!< \brief IFSR: LPAE Position */ +#define IFSR_LPAE_Msk (0x1UL << IFSR_LPAE_Pos) /*!< \brief IFSR: LPAE Mask */ + +#define IFSR_FS0_Pos 0U /*!< \brief IFSR: FS0 Position */ +#define IFSR_FS0_Msk (0xFUL << IFSR_FS0_Pos) /*!< \brief IFSR: FS0 Mask */ + +#define IFSR_STATUS_Pos 0U /*!< \brief IFSR: STATUS Position */ +#define IFSR_STATUS_Msk (0x3FUL << IFSR_STATUS_Pos) /*!< \brief IFSR: STATUS Mask */ + +/* CP15 Register ISR */ +typedef union +{ + struct + { + RESERVED(0:6, uint32_t) + uint32_t F:1; /*!< \brief bit: 6 FIQ pending bit */ + uint32_t I:1; /*!< \brief bit: 7 IRQ pending bit */ + uint32_t A:1; /*!< \brief bit: 8 External abort pending bit */ + RESERVED(1:23, uint32_t) + } b; /*!< \brief Structure used for bit access */ + uint32_t w; /*!< \brief Type used for word access */ +} ISR_Type; + +#define ISR_A_Pos 13U /*!< \brief ISR: A Position */ +#define ISR_A_Msk (1UL << ISR_A_Pos) /*!< \brief ISR: A Mask */ + +#define ISR_I_Pos 12U /*!< \brief ISR: I Position */ +#define ISR_I_Msk (1UL << ISR_I_Pos) /*!< \brief ISR: I Mask */ + +#define ISR_F_Pos 11U /*!< \brief ISR: F Position */ +#define ISR_F_Msk (1UL << ISR_F_Pos) /*!< \brief ISR: F Mask */ + +/* DACR Register */ +#define DACR_D_Pos_(n) (2U*n) /*!< \brief DACR: Dn Position */ +#define DACR_D_Msk_(n) (3UL << DACR_D_Pos_(n)) /*!< \brief DACR: Dn Mask */ +#define DACR_Dn_NOACCESS 0U /*!< \brief DACR Dn field: No access */ +#define DACR_Dn_CLIENT 1U /*!< \brief DACR Dn field: Client */ +#define DACR_Dn_MANAGER 3U /*!< \brief DACR Dn field: Manager */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param [in] field Name of the register bit field. + \param [in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param [in] field Name of the register bit field. + \param [in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + + +/** + \brief Union type to access the L2C_310 Cache Controller. +*/ +#if (__L2C_PRESENT == 1U) || defined(DOXYGEN) + typedef struct + { + __IM uint32_t CACHE_ID; /*!< \brief Offset: 0x0000 (R/ ) Cache ID Register */ + __IM uint32_t CACHE_TYPE; /*!< \brief Offset: 0x0004 (R/ ) Cache Type Register */ + RESERVED(0[0x3e], uint32_t) + __IOM uint32_t CONTROL; /*!< \brief Offset: 0x0100 (R/W) Control Register */ + __IOM uint32_t AUX_CNT; /*!< \brief Offset: 0x0104 (R/W) Auxiliary Control */ + RESERVED(1[0x3e], uint32_t) + __IOM uint32_t EVENT_CONTROL; /*!< \brief Offset: 0x0200 (R/W) Event Counter Control */ + __IOM uint32_t EVENT_COUNTER1_CONF; /*!< \brief Offset: 0x0204 (R/W) Event Counter 1 Configuration */ + __IOM uint32_t EVENT_COUNTER0_CONF; /*!< \brief Offset: 0x0208 (R/W) Event Counter 1 Configuration */ + RESERVED(2[0x2], uint32_t) + __IOM uint32_t INTERRUPT_MASK; /*!< \brief Offset: 0x0214 (R/W) Interrupt Mask */ + __IM uint32_t MASKED_INT_STATUS; /*!< \brief Offset: 0x0218 (R/ ) Masked Interrupt Status */ + __IM uint32_t RAW_INT_STATUS; /*!< \brief Offset: 0x021c (R/ ) Raw Interrupt Status */ + __OM uint32_t INTERRUPT_CLEAR; /*!< \brief Offset: 0x0220 ( /W) Interrupt Clear */ + RESERVED(3[0x143], uint32_t) + __IOM uint32_t CACHE_SYNC; /*!< \brief Offset: 0x0730 (R/W) Cache Sync */ + RESERVED(4[0xf], uint32_t) + __IOM uint32_t INV_LINE_PA; /*!< \brief Offset: 0x0770 (R/W) Invalidate Line By PA */ + RESERVED(6[2], uint32_t) + __IOM uint32_t INV_WAY; /*!< \brief Offset: 0x077c (R/W) Invalidate by Way */ + RESERVED(5[0xc], uint32_t) + __IOM uint32_t CLEAN_LINE_PA; /*!< \brief Offset: 0x07b0 (R/W) Clean Line by PA */ + RESERVED(7[1], uint32_t) + __IOM uint32_t CLEAN_LINE_INDEX_WAY; /*!< \brief Offset: 0x07b8 (R/W) Clean Line by Index/Way */ + __IOM uint32_t CLEAN_WAY; /*!< \brief Offset: 0x07bc (R/W) Clean by Way */ + RESERVED(8[0xc], uint32_t) + __IOM uint32_t CLEAN_INV_LINE_PA; /*!< \brief Offset: 0x07f0 (R/W) Clean and Invalidate Line by PA */ + RESERVED(9[1], uint32_t) + __IOM uint32_t CLEAN_INV_LINE_INDEX_WAY; /*!< \brief Offset: 0x07f8 (R/W) Clean and Invalidate Line by Index/Way */ + __IOM uint32_t CLEAN_INV_WAY; /*!< \brief Offset: 0x07fc (R/W) Clean and Invalidate by Way */ + RESERVED(10[0x40], uint32_t) + __IOM uint32_t DATA_LOCK_0_WAY; /*!< \brief Offset: 0x0900 (R/W) Data Lockdown 0 by Way */ + __IOM uint32_t INST_LOCK_0_WAY; /*!< \brief Offset: 0x0904 (R/W) Instruction Lockdown 0 by Way */ + __IOM uint32_t DATA_LOCK_1_WAY; /*!< \brief Offset: 0x0908 (R/W) Data Lockdown 1 by Way */ + __IOM uint32_t INST_LOCK_1_WAY; /*!< \brief Offset: 0x090c (R/W) Instruction Lockdown 1 by Way */ + __IOM uint32_t DATA_LOCK_2_WAY; /*!< \brief Offset: 0x0910 (R/W) Data Lockdown 2 by Way */ + __IOM uint32_t INST_LOCK_2_WAY; /*!< \brief Offset: 0x0914 (R/W) Instruction Lockdown 2 by Way */ + __IOM uint32_t DATA_LOCK_3_WAY; /*!< \brief Offset: 0x0918 (R/W) Data Lockdown 3 by Way */ + __IOM uint32_t INST_LOCK_3_WAY; /*!< \brief Offset: 0x091c (R/W) Instruction Lockdown 3 by Way */ + __IOM uint32_t DATA_LOCK_4_WAY; /*!< \brief Offset: 0x0920 (R/W) Data Lockdown 4 by Way */ + __IOM uint32_t INST_LOCK_4_WAY; /*!< \brief Offset: 0x0924 (R/W) Instruction Lockdown 4 by Way */ + __IOM uint32_t DATA_LOCK_5_WAY; /*!< \brief Offset: 0x0928 (R/W) Data Lockdown 5 by Way */ + __IOM uint32_t INST_LOCK_5_WAY; /*!< \brief Offset: 0x092c (R/W) Instruction Lockdown 5 by Way */ + __IOM uint32_t DATA_LOCK_6_WAY; /*!< \brief Offset: 0x0930 (R/W) Data Lockdown 5 by Way */ + __IOM uint32_t INST_LOCK_6_WAY; /*!< \brief Offset: 0x0934 (R/W) Instruction Lockdown 5 by Way */ + __IOM uint32_t DATA_LOCK_7_WAY; /*!< \brief Offset: 0x0938 (R/W) Data Lockdown 6 by Way */ + __IOM uint32_t INST_LOCK_7_WAY; /*!< \brief Offset: 0x093c (R/W) Instruction Lockdown 6 by Way */ + RESERVED(11[0x4], uint32_t) + __IOM uint32_t LOCK_LINE_EN; /*!< \brief Offset: 0x0950 (R/W) Lockdown by Line Enable */ + __IOM uint32_t UNLOCK_ALL_BY_WAY; /*!< \brief Offset: 0x0954 (R/W) Unlock All Lines by Way */ + RESERVED(12[0xaa], uint32_t) + __IOM uint32_t ADDRESS_FILTER_START; /*!< \brief Offset: 0x0c00 (R/W) Address Filtering Start */ + __IOM uint32_t ADDRESS_FILTER_END; /*!< \brief Offset: 0x0c04 (R/W) Address Filtering End */ + RESERVED(13[0xce], uint32_t) + __IOM uint32_t DEBUG_CONTROL; /*!< \brief Offset: 0x0f40 (R/W) Debug Control Register */ + } L2C_310_TypeDef; + + #define L2C_310 ((L2C_310_TypeDef *)L2C_310_BASE) /*!< \brief L2C_310 register set access pointer */ +#endif /* #if (__L2C_PRESENT == 1U) || defined(DOXYGEN) */ + +#if (__GIC_PRESENT == 1U) || defined(DOXYGEN) + #include "gicv2.h" +#endif /* (__GIC_PRESENT == 1U) || defined(DOXYGEN) */ + +#if (__TIM_PRESENT == 1U) || defined(DOXYGEN) + #if ((__CORTEX_A == 5U) || (__CORTEX_A == 9U)) || defined(DOXYGEN) + /** \brief Structure type to access the Private Timer + */ + typedef struct + { + __IOM uint32_t LOAD; //!< \brief Offset: 0x000 (R/W) Private Timer Load Register + __IOM uint32_t COUNTER; //!< \brief Offset: 0x004 (R/W) Private Timer Counter Register + __IOM uint32_t CONTROL; //!< \brief Offset: 0x008 (R/W) Private Timer Control Register + __IOM uint32_t ISR; //!< \brief Offset: 0x00C (R/W) Private Timer Interrupt Status Register + RESERVED(0[4], uint32_t) + __IOM uint32_t WLOAD; //!< \brief Offset: 0x020 (R/W) Watchdog Load Register + __IOM uint32_t WCOUNTER; //!< \brief Offset: 0x024 (R/W) Watchdog Counter Register + __IOM uint32_t WCONTROL; //!< \brief Offset: 0x028 (R/W) Watchdog Control Register + __IOM uint32_t WISR; //!< \brief Offset: 0x02C (R/W) Watchdog Interrupt Status Register + __IOM uint32_t WRESET; //!< \brief Offset: 0x030 (R/W) Watchdog Reset Status Register + __OM uint32_t WDISABLE; //!< \brief Offset: 0x034 ( /W) Watchdog Disable Register + } Timer_Type; + #define PTIM ((Timer_Type *) TIMER_BASE ) /*!< \brief Timer register struct */ + + /* PTIM Control Register */ + #define PTIM_CONTROL_Enable_Pos 0U /*!< PTIM CONTROL: Enable Position */ + #define PTIM_CONTROL_Enable_Msk (0x1U /*<< PTIM_CONTROL_Enable_Pos*/) /*!< PTIM CONTROL: Enable Mask */ + #define PTIM_CONTROL_Enable(x) (((uint32_t)(((uint32_t)(x)) /*<< PTIM_CONTROL_Enable_Pos*/)) & PTIM_CONTROL_Enable_Msk) + + #define PTIM_CONTROL_AutoReload_Pos 1U /*!< PTIM CONTROL: Auto Reload Position */ + #define PTIM_CONTROL_AutoReload_Msk (0x1U << PTIM_CONTROL_AutoReload_Pos) /*!< PTIM CONTROL: Auto Reload Mask */ + #define PTIM_CONTROL_AutoReload(x) (((uint32_t)(((uint32_t)(x)) << PTIM_CONTROL_AutoReload_Pos)) & PTIM_CONTROL_AutoReload_Msk) + + #define PTIM_CONTROL_IRQenable_Pos 2U /*!< PTIM CONTROL: IRQ Enabel Position */ + #define PTIM_CONTROL_IRQenable_Msk (0x1U << PTIM_CONTROL_IRQenable_Pos) /*!< PTIM CONTROL: IRQ Enabel Mask */ + #define PTIM_CONTROL_IRQenable(x) (((uint32_t)(((uint32_t)(x)) << PTIM_CONTROL_IRQenable_Pos)) & PTIM_CONTROL_IRQenable_Msk) + + #define PTIM_CONTROL_Prescaler_Pos 8U /*!< PTIM CONTROL: Prescaler Position */ + #define PTIM_CONTROL_Prescaler_Msk (0xFFU << PTIM_CONTROL_Prescaler_Pos) /*!< PTIM CONTROL: Prescaler Mask */ + #define PTIM_CONTROL_Prescaler(x) (((uint32_t)(((uint32_t)(x)) << PTIM_CONTROL_Prescaler_Pos)) & PTIM_CONTROL_Prescaler_Msk) + + /* WCONTROL Watchdog Control Register */ + #define PTIM_WCONTROL_Enable_Pos 0U /*!< PTIM WCONTROL: Enable Position */ + #define PTIM_WCONTROL_Enable_Msk (0x1U /*<< PTIM_WCONTROL_Enable_Pos*/) /*!< PTIM WCONTROL: Enable Mask */ + #define PTIM_WCONTROL_Enable(x) (((uint32_t)(((uint32_t)(x)) /*<< PTIM_WCONTROL_Enable_Pos*/)) & PTIM_WCONTROL_Enable_Msk) + + #define PTIM_WCONTROL_AutoReload_Pos 1U /*!< PTIM WCONTROL: Auto Reload Position */ + #define PTIM_WCONTROL_AutoReload_Msk (0x1U << PTIM_WCONTROL_AutoReload_Pos) /*!< PTIM WCONTROL: Auto Reload Mask */ + #define PTIM_WCONTROL_AutoReload(x) (((uint32_t)(((uint32_t)(x)) << PTIM_WCONTROL_AutoReload_Pos)) & PTIM_WCONTROL_AutoReload_Msk) + + #define PTIM_WCONTROL_IRQenable_Pos 2U /*!< PTIM WCONTROL: IRQ Enable Position */ + #define PTIM_WCONTROL_IRQenable_Msk (0x1U << PTIM_WCONTROL_IRQenable_Pos) /*!< PTIM WCONTROL: IRQ Enable Mask */ + #define PTIM_WCONTROL_IRQenable(x) (((uint32_t)(((uint32_t)(x)) << PTIM_WCONTROL_IRQenable_Pos)) & PTIM_WCONTROL_IRQenable_Msk) + + #define PTIM_WCONTROL_Mode_Pos 3U /*!< PTIM WCONTROL: Watchdog Mode Position */ + #define PTIM_WCONTROL_Mode_Msk (0x1U << PTIM_WCONTROL_Mode_Pos) /*!< PTIM WCONTROL: Watchdog Mode Mask */ + #define PTIM_WCONTROL_Mode(x) (((uint32_t)(((uint32_t)(x)) << PTIM_WCONTROL_Mode_Pos)) & PTIM_WCONTROL_Mode_Msk) + + #define PTIM_WCONTROL_Presacler_Pos 8U /*!< PTIM WCONTROL: Prescaler Position */ + #define PTIM_WCONTROL_Presacler_Msk (0xFFU << PTIM_WCONTROL_Presacler_Pos) /*!< PTIM WCONTROL: Prescaler Mask */ + #define PTIM_WCONTROL_Presacler(x) (((uint32_t)(((uint32_t)(x)) << PTIM_WCONTROL_Presacler_Pos)) & PTIM_WCONTROL_Presacler_Msk) + + /* WISR Watchdog Interrupt Status Register */ + #define PTIM_WISR_EventFlag_Pos 0U /*!< PTIM WISR: Event Flag Position */ + #define PTIM_WISR_EventFlag_Msk (0x1U /*<< PTIM_WISR_EventFlag_Pos*/) /*!< PTIM WISR: Event Flag Mask */ + #define PTIM_WISR_EventFlag(x) (((uint32_t)(((uint32_t)(x)) /*<< PTIM_WISR_EventFlag_Pos*/)) & PTIM_WISR_EventFlag_Msk) + + /* WRESET Watchdog Reset Status */ + #define PTIM_WRESET_ResetFlag_Pos 0U /*!< PTIM WRESET: Reset Flag Position */ + #define PTIM_WRESET_ResetFlag_Msk (0x1U /*<< PTIM_WRESET_ResetFlag_Pos*/) /*!< PTIM WRESET: Reset Flag Mask */ + #define PTIM_WRESET_ResetFlag(x) (((uint32_t)(((uint32_t)(x)) /*<< PTIM_WRESET_ResetFlag_Pos*/)) & PTIM_WRESET_ResetFlag_Msk) + + #endif /* ((__CORTEX_A == 5U) || (__CORTEX_A == 9U)) || defined(DOXYGEN) */ +#endif /* (__TIM_PRESENT == 1U) || defined(DOXYGEN) */ + + /******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - L1 Cache Functions + - L2C-310 Cache Controller Functions + - PL1 Timer Functions + - GIC Functions + - MMU Functions + ******************************************************************************/ + +/* ########################## L1 Cache functions ################################# */ + +/** \brief Enable Caches by setting I and C bits in SCTLR register. +*/ +__STATIC_FORCEINLINE void L1C_EnableCaches(void) { + __set_SCTLR( __get_SCTLR() | SCTLR_I_Msk | SCTLR_C_Msk); + __ISB(); +} + +/** \brief Disable Caches by clearing I and C bits in SCTLR register. +*/ +__STATIC_FORCEINLINE void L1C_DisableCaches(void) { + __set_SCTLR( __get_SCTLR() & (~SCTLR_I_Msk) & (~SCTLR_C_Msk)); + __ISB(); +} + +/** \brief Enable Branch Prediction by setting Z bit in SCTLR register. +*/ +__STATIC_FORCEINLINE void L1C_EnableBTAC(void) { + __set_SCTLR( __get_SCTLR() | SCTLR_Z_Msk); + __ISB(); +} + +/** \brief Disable Branch Prediction by clearing Z bit in SCTLR register. +*/ +__STATIC_FORCEINLINE void L1C_DisableBTAC(void) { + __set_SCTLR( __get_SCTLR() & (~SCTLR_Z_Msk)); + __ISB(); +} + +/** \brief Invalidate entire branch predictor array +*/ +__STATIC_FORCEINLINE void L1C_InvalidateBTAC(void) { + __set_BPIALL(0); + __DSB(); //ensure completion of the invalidation + __ISB(); //ensure instruction fetch path sees new state +} + +/** \brief Clean instruction cache line by address. +* \param [in] va Pointer to instructions to clear the cache for. +*/ +__STATIC_FORCEINLINE void L1C_InvalidateICacheMVA(void *va) { + __set_ICIMVAC((uint32_t)va); + __DSB(); //ensure completion of the invalidation + __ISB(); //ensure instruction fetch path sees new I cache state +} + +/** \brief Invalidate the whole instruction cache +*/ +__STATIC_FORCEINLINE void L1C_InvalidateICacheAll(void) { + __set_ICIALLU(0); + __DSB(); //ensure completion of the invalidation + __ISB(); //ensure instruction fetch path sees new I cache state +} + +/** \brief Clean data cache line by address. +* \param [in] va Pointer to data to clear the cache for. +*/ +__STATIC_FORCEINLINE void L1C_CleanDCacheMVA(void *va) { + __set_DCCMVAC((uint32_t)va); + __DMB(); //ensure the ordering of data cache maintenance operations and their effects +} + +/** \brief Invalidate data cache line by address. +* \param [in] va Pointer to data to invalidate the cache for. +*/ +__STATIC_FORCEINLINE void L1C_InvalidateDCacheMVA(void *va) { + __set_DCIMVAC((uint32_t)va); + __DMB(); //ensure the ordering of data cache maintenance operations and their effects +} + +/** \brief Clean and Invalidate data cache by address. +* \param [in] va Pointer to data to invalidate the cache for. +*/ +__STATIC_FORCEINLINE void L1C_CleanInvalidateDCacheMVA(void *va) { + __set_DCCIMVAC((uint32_t)va); + __DMB(); //ensure the ordering of data cache maintenance operations and their effects +} + +/** \brief Calculate log2 rounded up +* - log(0) => 0 +* - log(1) => 0 +* - log(2) => 1 +* - log(3) => 2 +* - log(4) => 2 +* - log(5) => 3 +* : : +* - log(16) => 4 +* - log(32) => 5 +* : : +* \param [in] n input value parameter +* \return log2(n) +*/ +__STATIC_FORCEINLINE uint8_t __log2_up(uint32_t n) +{ + if (n < 2U) { + return 0U; + } + uint8_t log = 0U; + uint32_t t = n; + while(t > 1U) + { + log++; + t >>= 1U; + } + if (n & 1U) { log++; } + return log; +} + +/** \brief Apply cache maintenance to given cache level. +* \param [in] level cache level to be maintained +* \param [in] maint 0 - invalidate, 1 - clean, otherwise - invalidate and clean +*/ +__STATIC_FORCEINLINE void __L1C_MaintainDCacheSetWay(uint32_t level, uint32_t maint) +{ + uint32_t Dummy; + uint32_t ccsidr; + uint32_t num_sets; + uint32_t num_ways; + uint32_t shift_way; + uint32_t log2_linesize; + int32_t log2_num_ways; + + Dummy = level << 1U; + /* set csselr, select ccsidr register */ + __set_CSSELR(Dummy); + /* get current ccsidr register */ + ccsidr = __get_CCSIDR(); + num_sets = ((ccsidr & 0x0FFFE000U) >> 13U) + 1U; + num_ways = ((ccsidr & 0x00001FF8U) >> 3U) + 1U; + log2_linesize = (ccsidr & 0x00000007U) + 2U + 2U; + log2_num_ways = __log2_up(num_ways); + if ((log2_num_ways < 0) || (log2_num_ways > 32)) { + return; // FATAL ERROR + } + shift_way = 32U - (uint32_t)log2_num_ways; + for(int32_t way = num_ways-1; way >= 0; way--) + { + for(int32_t set = num_sets-1; set >= 0; set--) + { + Dummy = (level << 1U) | (((uint32_t)set) << log2_linesize) | (((uint32_t)way) << shift_way); + switch (maint) + { + case 0U: __set_DCISW(Dummy); break; + case 1U: __set_DCCSW(Dummy); break; + default: __set_DCCISW(Dummy); break; + } + } + } + __DMB(); +} + +/** \brief Clean and Invalidate the entire data or unified cache +* Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency +* \param [in] op 0 - invalidate, 1 - clean, otherwise - invalidate and clean +*/ +__STATIC_FORCEINLINE void L1C_CleanInvalidateCache(uint32_t op) { + uint32_t clidr; + uint32_t cache_type; + clidr = __get_CLIDR(); + for(uint32_t i = 0U; i<7U; i++) + { + cache_type = (clidr >> i*3U) & 0x7UL; + if ((cache_type >= 2U) && (cache_type <= 4U)) + { + __L1C_MaintainDCacheSetWay(i, op); + } + } +} + +/** \brief Clean and Invalidate the entire data or unified cache +* Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency +* \param [in] op 0 - invalidate, 1 - clean, otherwise - invalidate and clean +* \deprecated Use generic L1C_CleanInvalidateCache instead. +*/ +CMSIS_DEPRECATED +__STATIC_FORCEINLINE void __L1C_CleanInvalidateCache(uint32_t op) { + L1C_CleanInvalidateCache(op); +} + +/** \brief Invalidate the whole data cache. +*/ +__STATIC_FORCEINLINE void L1C_InvalidateDCacheAll(void) { + L1C_CleanInvalidateCache(0); +} + +/** \brief Clean the whole data cache. + */ +__STATIC_FORCEINLINE void L1C_CleanDCacheAll(void) { + L1C_CleanInvalidateCache(1); +} + +/** \brief Clean and invalidate the whole data cache. + */ +__STATIC_FORCEINLINE void L1C_CleanInvalidateDCacheAll(void) { + L1C_CleanInvalidateCache(2); +} + +/* ########################## L2 Cache functions ################################# */ +#if (__L2C_PRESENT == 1U) || defined(DOXYGEN) + /** \brief Cache Sync operation by writing CACHE_SYNC register. + */ + __STATIC_INLINE void L2C_Sync(void) + { + L2C_310->CACHE_SYNC = 0x0; + } + + /** \brief Read cache controller cache ID from CACHE_ID register. + * \return L2C_310_TypeDef::CACHE_ID + */ + __STATIC_INLINE int L2C_GetID (void) + { + return L2C_310->CACHE_ID; + } + + /** \brief Read cache controller cache type from CACHE_TYPE register. + * \return L2C_310_TypeDef::CACHE_TYPE + */ + __STATIC_INLINE int L2C_GetType (void) + { + return L2C_310->CACHE_TYPE; + } + + /** \brief Invalidate all cache by way + */ + __STATIC_INLINE void L2C_InvAllByWay (void) + { + unsigned int assoc; + + if (L2C_310->AUX_CNT & (1U << 16U)) { + assoc = 16U; + } else { + assoc = 8U; + } + + L2C_310->INV_WAY = (1U << assoc) - 1U; + while(L2C_310->INV_WAY & ((1U << assoc) - 1U)); //poll invalidate + + L2C_Sync(); + } + + /** \brief Clean and Invalidate all cache by way + */ + __STATIC_INLINE void L2C_CleanInvAllByWay (void) + { + unsigned int assoc; + + if (L2C_310->AUX_CNT & (1U << 16U)) { + assoc = 16U; + } else { + assoc = 8U; + } + + L2C_310->CLEAN_INV_WAY = (1U << assoc) - 1U; + while(L2C_310->CLEAN_INV_WAY & ((1U << assoc) - 1U)); //poll invalidate + + L2C_Sync(); + } + + /** \brief Enable Level 2 Cache + */ + __STATIC_INLINE void L2C_Enable(void) + { + L2C_310->CONTROL = 0; + L2C_310->INTERRUPT_CLEAR = 0x000001FFuL; + L2C_310->DEBUG_CONTROL = 0; + L2C_310->DATA_LOCK_0_WAY = 0; + L2C_310->CACHE_SYNC = 0; + L2C_310->CONTROL = 0x01; + L2C_Sync(); + } + + /** \brief Disable Level 2 Cache + */ + __STATIC_INLINE void L2C_Disable(void) + { + L2C_310->CONTROL = 0x00; + L2C_Sync(); + } + + /** \brief Invalidate cache by physical address + * \param [in] pa Pointer to data to invalidate cache for. + */ + __STATIC_INLINE void L2C_InvPa (void *pa) + { + L2C_310->INV_LINE_PA = (unsigned int)pa; + L2C_Sync(); + } + + /** \brief Clean cache by physical address + * \param [in] pa Pointer to data to invalidate cache for. + */ + __STATIC_INLINE void L2C_CleanPa (void *pa) + { + L2C_310->CLEAN_LINE_PA = (unsigned int)pa; + L2C_Sync(); + } + + /** \brief Clean and invalidate cache by physical address + * \param [in] pa Pointer to data to invalidate cache for. + */ + __STATIC_INLINE void L2C_CleanInvPa (void *pa) + { + L2C_310->CLEAN_INV_LINE_PA = (unsigned int)pa; + L2C_Sync(); + } +#endif /* #if (__L2C_PRESENT == 1U) || defined(DOXYGEN) */ + +/* ########################## Generic Timer functions ############################ */ +#if (__TIM_PRESENT == 1U) || defined(DOXYGEN) + +/* PL1 Physical Timer */ +#if (__CORTEX_A == 7U) || defined(DOXYGEN) + /** \brief Physical Timer Control register */ + typedef union + { + struct + { + uint32_t ENABLE:1; /*!< \brief bit: 0 Enables the timer. */ + uint32_t IMASK:1; /*!< \brief bit: 1 Timer output signal mask bit. */ + uint32_t ISTATUS:1; /*!< \brief bit: 2 The status of the timer. */ + RESERVED(0:29, uint32_t) + } b; /*!< \brief Structure used for bit access */ + uint32_t w; /*!< \brief Type used for word access */ + } CNTP_CTL_Type; + + /** \brief Configures the frequency the timer shall run at. + * \param [in] value The timer frequency in Hz. + */ + __STATIC_INLINE void PL1_SetCounterFrequency(uint32_t value) + { + __set_CNTFRQ(value); + __ISB(); + } + + /** \brief Sets the reset value of the timer. + * \param [in] value The value the timer is loaded with. + */ + __STATIC_INLINE void PL1_SetLoadValue(uint32_t value) + { + __set_CNTP_TVAL(value); + __ISB(); + } + + /** \brief Get the current counter value. + * \return Current counter value. + */ + __STATIC_INLINE uint32_t PL1_GetCurrentValue(void) + { + return(__get_CNTP_TVAL()); + } + + /** \brief Get the current physical counter value. + * \return Current physical counter value. + */ + __STATIC_INLINE uint64_t PL1_GetCurrentPhysicalValue(void) + { + return(__get_CNTPCT()); + } + + /** \brief Set the physical compare value. + * \param [in] value New physical timer compare value. + */ + __STATIC_INLINE void PL1_SetPhysicalCompareValue(uint64_t value) + { + __set_CNTP_CVAL(value); + __ISB(); + } + + /** \brief Get the physical compare value. + * \return Physical compare value. + */ + __STATIC_INLINE uint64_t PL1_GetPhysicalCompareValue(void) + { + return(__get_CNTP_CVAL()); + } + + /** \brief Configure the timer by setting the control value. + * \param [in] value New timer control value. + */ + __STATIC_INLINE void PL1_SetControl(uint32_t value) + { + __set_CNTP_CTL(value); + __ISB(); + } + + /** \brief Get the control value. + * \return Control value. + */ + __STATIC_INLINE uint32_t PL1_GetControl(void) + { + return(__get_CNTP_CTL()); + } +#endif /* (__CORTEX_A == 7U) || defined(DOXYGEN) */ + +/* Private Timer */ +#if ((__CORTEX_A == 5U) || (__CORTEX_A == 9U)) || defined(DOXYGEN) + /** \brief Set the load value to timers LOAD register. + * \param [in] value The load value to be set. + */ + __STATIC_INLINE void PTIM_SetLoadValue(uint32_t value) + { + PTIM->LOAD = value; + } + + /** \brief Get the load value from timers LOAD register. + * \return Timer_Type::LOAD + */ + __STATIC_INLINE uint32_t PTIM_GetLoadValue(void) + { + return(PTIM->LOAD); + } + + /** \brief Set current counter value from its COUNTER register. + */ + __STATIC_INLINE void PTIM_SetCurrentValue(uint32_t value) + { + PTIM->COUNTER = value; + } + + /** \brief Get current counter value from timers COUNTER register. + * \result Timer_Type::COUNTER + */ + __STATIC_INLINE uint32_t PTIM_GetCurrentValue(void) + { + return(PTIM->COUNTER); + } + + /** \brief Configure the timer using its CONTROL register. + * \param [in] value The new configuration value to be set. + */ + __STATIC_INLINE void PTIM_SetControl(uint32_t value) + { + PTIM->CONTROL = value; + } + + /** ref Timer_Type::CONTROL Get the current timer configuration from its CONTROL register. + * \return Timer_Type::CONTROL + */ + __STATIC_INLINE uint32_t PTIM_GetControl(void) + { + return(PTIM->CONTROL); + } + + /** ref Timer_Type::CONTROL Get the event flag in timers ISR register. + * \return 0 - flag is not set, 1- flag is set + */ + __STATIC_INLINE uint32_t PTIM_GetEventFlag(void) + { + return (PTIM->ISR & 1UL); + } + + /** ref Timer_Type::CONTROL Clears the event flag in timers ISR register. + */ + __STATIC_INLINE void PTIM_ClearEventFlag(void) + { + PTIM->ISR = 1; + } +#endif /* #if ((__CORTEX_A == 5U) || (__CORTEX_A == 9U)) || defined(DOXYGEN) */ +#endif /* (__TIM_PRESENT == 1U) || defined(DOXYGEN) */ + +/* ########################## MMU functions ###################################### */ + +#define SECTION_DESCRIPTOR (0x2) +#define SECTION_MASK (0xFFFFFFFC) + +#define SECTION_TEXCB_MASK (0xFFFF8FF3) +#define SECTION_B_SHIFT (2) +#define SECTION_C_SHIFT (3) +#define SECTION_TEX0_SHIFT (12) +#define SECTION_TEX1_SHIFT (13) +#define SECTION_TEX2_SHIFT (14) + +#define SECTION_XN_MASK (0xFFFFFFEF) +#define SECTION_XN_SHIFT (4) + +#define SECTION_DOMAIN_MASK (0xFFFFFE1F) +#define SECTION_DOMAIN_SHIFT (5) + +#define SECTION_P_MASK (0xFFFFFDFF) +#define SECTION_P_SHIFT (9) + +#define SECTION_AP_MASK (0xFFFF73FF) +#define SECTION_AP_SHIFT (10) +#define SECTION_AP2_SHIFT (15) + +#define SECTION_S_MASK (0xFFFEFFFF) +#define SECTION_S_SHIFT (16) + +#define SECTION_NG_MASK (0xFFFDFFFF) +#define SECTION_NG_SHIFT (17) + +#define SECTION_NS_MASK (0xFFF7FFFF) +#define SECTION_NS_SHIFT (19) + +#define PAGE_L1_DESCRIPTOR (0x1) +#define PAGE_L1_MASK (0xFFFFFFFC) + +#define PAGE_L2_4K_DESC (0x2) +#define PAGE_L2_4K_MASK (0xFFFFFFFD) + +#define PAGE_L2_64K_DESC (0x1) +#define PAGE_L2_64K_MASK (0xFFFFFFFC) + +#define PAGE_4K_TEXCB_MASK (0xFFFFFE33) +#define PAGE_4K_B_SHIFT (2) +#define PAGE_4K_C_SHIFT (3) +#define PAGE_4K_TEX0_SHIFT (6) +#define PAGE_4K_TEX1_SHIFT (7) +#define PAGE_4K_TEX2_SHIFT (8) + +#define PAGE_64K_TEXCB_MASK (0xFFFF8FF3) +#define PAGE_64K_B_SHIFT (2) +#define PAGE_64K_C_SHIFT (3) +#define PAGE_64K_TEX0_SHIFT (12) +#define PAGE_64K_TEX1_SHIFT (13) +#define PAGE_64K_TEX2_SHIFT (14) + +#define PAGE_TEXCB_MASK (0xFFFF8FF3) +#define PAGE_B_SHIFT (2) +#define PAGE_C_SHIFT (3) +#define PAGE_TEX_SHIFT (12) + +#define PAGE_XN_4K_MASK (0xFFFFFFFE) +#define PAGE_XN_4K_SHIFT (0) +#define PAGE_XN_64K_MASK (0xFFFF7FFF) +#define PAGE_XN_64K_SHIFT (15) + +#define PAGE_DOMAIN_MASK (0xFFFFFE1F) +#define PAGE_DOMAIN_SHIFT (5) + +#define PAGE_P_MASK (0xFFFFFDFF) +#define PAGE_P_SHIFT (9) + +#define PAGE_AP_MASK (0xFFFFFDCF) +#define PAGE_AP_SHIFT (4) +#define PAGE_AP2_SHIFT (9) + +#define PAGE_S_MASK (0xFFFFFBFF) +#define PAGE_S_SHIFT (10) + +#define PAGE_NG_MASK (0xFFFFF7FF) +#define PAGE_NG_SHIFT (11) + +#define PAGE_NS_MASK (0xFFFFFFF7) +#define PAGE_NS_SHIFT (3) + +#define OFFSET_1M (0x00100000) +#define OFFSET_64K (0x00010000) +#define OFFSET_4K (0x00001000) + +#define DESCRIPTOR_FAULT (0x00000000) + +/* Attributes enumerations */ + +/* Region size attributes */ +typedef enum +{ + SECTION, + PAGE_4k, + PAGE_64k, +} mmu_region_size_Type; + +/* Region type attributes */ +typedef enum +{ + NORMAL, + DEVICE, + SHARED_DEVICE, + NON_SHARED_DEVICE, + STRONGLY_ORDERED +} mmu_memory_Type; + +/* Region cacheability attributes */ +typedef enum +{ + NON_CACHEABLE, + WB_WA, + WT, + WB_NO_WA, +} mmu_cacheability_Type; + +/* Region parity check attributes */ +typedef enum +{ + ECC_DISABLED, + ECC_ENABLED, +} mmu_ecc_check_Type; + +/* Region execution attributes */ +typedef enum +{ + EXECUTE, + NON_EXECUTE, +} mmu_execute_Type; + +/* Region global attributes */ +typedef enum +{ + GLOBAL, + NON_GLOBAL, +} mmu_global_Type; + +/* Region shareability attributes */ +typedef enum +{ + NON_SHARED, + SHARED, +} mmu_shared_Type; + +/* Region security attributes */ +typedef enum +{ + SECURE, + NON_SECURE, +} mmu_secure_Type; + +/* Region access attributes */ +typedef enum +{ + NO_ACCESS, + RW, + READ, +} mmu_access_Type; + +/* Memory Region definition */ +typedef struct RegionStruct { + mmu_region_size_Type rg_t; + mmu_memory_Type mem_t; + uint8_t domain; + mmu_cacheability_Type inner_norm_t; + mmu_cacheability_Type outer_norm_t; + mmu_ecc_check_Type e_t; + mmu_execute_Type xn_t; + mmu_global_Type g_t; + mmu_secure_Type sec_t; + mmu_access_Type priv_t; + mmu_access_Type user_t; + mmu_shared_Type sh_t; + +} mmu_region_attributes_Type; + +//Following macros define the descriptors and attributes +//Sect_Normal. Outer & inner wb/wa, non-shareable, executable, rw, domain 0 +#define section_normal(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = WB_WA; \ + region.outer_norm_t = WB_WA; \ + region.mem_t = NORMAL; \ + region.sec_t = SECURE; \ + region.xn_t = EXECUTE; \ + region.priv_t = RW; \ + region.user_t = RW; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); + +//Sect_Normal_NC. Outer & inner non-cacheable, non-shareable, executable, rw, domain 0 +#define section_normal_nc(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = NON_CACHEABLE; \ + region.outer_norm_t = NON_CACHEABLE; \ + region.mem_t = NORMAL; \ + region.sec_t = SECURE; \ + region.xn_t = EXECUTE; \ + region.priv_t = RW; \ + region.user_t = RW; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); + +//Sect_Normal_Cod. Outer & inner wb/wa, non-shareable, executable, ro, domain 0 +#define section_normal_cod(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = WB_WA; \ + region.outer_norm_t = WB_WA; \ + region.mem_t = NORMAL; \ + region.sec_t = SECURE; \ + region.xn_t = EXECUTE; \ + region.priv_t = READ; \ + region.user_t = READ; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); + +//Sect_Normal_RO. Sect_Normal_Cod, but not executable +#define section_normal_ro(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = WB_WA; \ + region.outer_norm_t = WB_WA; \ + region.mem_t = NORMAL; \ + region.sec_t = SECURE; \ + region.xn_t = NON_EXECUTE; \ + region.priv_t = READ; \ + region.user_t = READ; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); + +//Sect_Normal_RW. Sect_Normal_Cod, but writeable and not executable +#define section_normal_rw(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = WB_WA; \ + region.outer_norm_t = WB_WA; \ + region.mem_t = NORMAL; \ + region.sec_t = SECURE; \ + region.xn_t = NON_EXECUTE; \ + region.priv_t = RW; \ + region.user_t = RW; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); +//Sect_SO. Strongly-ordered (therefore shareable), not executable, rw, domain 0, base addr 0 +#define section_so(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = NON_CACHEABLE; \ + region.outer_norm_t = NON_CACHEABLE; \ + region.mem_t = STRONGLY_ORDERED; \ + region.sec_t = SECURE; \ + region.xn_t = NON_EXECUTE; \ + region.priv_t = RW; \ + region.user_t = RW; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); + +//Sect_Device_RO. Device, non-shareable, non-executable, ro, domain 0, base addr 0 +#define section_device_ro(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = NON_CACHEABLE; \ + region.outer_norm_t = NON_CACHEABLE; \ + region.mem_t = STRONGLY_ORDERED; \ + region.sec_t = SECURE; \ + region.xn_t = NON_EXECUTE; \ + region.priv_t = READ; \ + region.user_t = READ; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); + +//Sect_Device_RW. Sect_Device_RO, but writeable +#define section_device_rw(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = NON_CACHEABLE; \ + region.outer_norm_t = NON_CACHEABLE; \ + region.mem_t = STRONGLY_ORDERED; \ + region.sec_t = SECURE; \ + region.xn_t = NON_EXECUTE; \ + region.priv_t = RW; \ + region.user_t = RW; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); +//Page_4k_Device_RW. Shared device, not executable, rw, domain 0 +#define page4k_device_rw(descriptor_l1, descriptor_l2, region) region.rg_t = PAGE_4k; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = NON_CACHEABLE; \ + region.outer_norm_t = NON_CACHEABLE; \ + region.mem_t = SHARED_DEVICE; \ + region.sec_t = SECURE; \ + region.xn_t = NON_EXECUTE; \ + region.priv_t = RW; \ + region.user_t = RW; \ + region.sh_t = NON_SHARED; \ + MMU_GetPageDescriptor(&descriptor_l1, &descriptor_l2, region); + +//Page_64k_Device_RW. Shared device, not executable, rw, domain 0 +#define page64k_device_rw(descriptor_l1, descriptor_l2, region) region.rg_t = PAGE_64k; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = NON_CACHEABLE; \ + region.outer_norm_t = NON_CACHEABLE; \ + region.mem_t = SHARED_DEVICE; \ + region.sec_t = SECURE; \ + region.xn_t = NON_EXECUTE; \ + region.priv_t = RW; \ + region.user_t = RW; \ + region.sh_t = NON_SHARED; \ + MMU_GetPageDescriptor(&descriptor_l1, &descriptor_l2, region); + +/** \brief Set section execution-never attribute + + \param [out] descriptor_l1 L1 descriptor. + \param [in] xn Section execution-never attribute : EXECUTE , NON_EXECUTE. + + \return 0 +*/ +__STATIC_INLINE int MMU_XNSection(uint32_t *descriptor_l1, mmu_execute_Type xn) +{ + *descriptor_l1 &= SECTION_XN_MASK; + *descriptor_l1 |= ((xn & 0x1) << SECTION_XN_SHIFT); + return 0; +} + +/** \brief Set section domain + + \param [out] descriptor_l1 L1 descriptor. + \param [in] domain Section domain + + \return 0 +*/ +__STATIC_INLINE int MMU_DomainSection(uint32_t *descriptor_l1, uint8_t domain) +{ + *descriptor_l1 &= SECTION_DOMAIN_MASK; + *descriptor_l1 |= ((domain & 0xF) << SECTION_DOMAIN_SHIFT); + return 0; +} + +/** \brief Set section parity check + + \param [out] descriptor_l1 L1 descriptor. + \param [in] p_bit Parity check: ECC_DISABLED, ECC_ENABLED + + \return 0 +*/ +__STATIC_INLINE int MMU_PSection(uint32_t *descriptor_l1, mmu_ecc_check_Type p_bit) +{ + *descriptor_l1 &= SECTION_P_MASK; + *descriptor_l1 |= ((p_bit & 0x1) << SECTION_P_SHIFT); + return 0; +} + +/** \brief Set section access privileges + + \param [out] descriptor_l1 L1 descriptor. + \param [in] user User Level Access: NO_ACCESS, RW, READ + \param [in] priv Privilege Level Access: NO_ACCESS, RW, READ + \param [in] afe Access flag enable + + \return 0 +*/ +__STATIC_INLINE int MMU_APSection(uint32_t *descriptor_l1, mmu_access_Type user, mmu_access_Type priv, uint32_t afe) +{ + uint32_t ap = 0; + + if (afe == 0) { //full access + if ((priv == NO_ACCESS) && (user == NO_ACCESS)) { ap = 0x0; } + else if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; } + else if ((priv == RW) && (user == READ)) { ap = 0x2; } + else if ((priv == RW) && (user == RW)) { ap = 0x3; } + else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; } + else if ((priv == READ) && (user == READ)) { ap = 0x7; } + } + + else { //Simplified access + if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; } + else if ((priv == RW) && (user == RW)) { ap = 0x3; } + else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; } + else if ((priv == READ) && (user == READ)) { ap = 0x7; } + } + + *descriptor_l1 &= SECTION_AP_MASK; + *descriptor_l1 |= (ap & 0x3) << SECTION_AP_SHIFT; + *descriptor_l1 |= ((ap & 0x4)>>2) << SECTION_AP2_SHIFT; + + return 0; +} + +/** \brief Set section shareability + + \param [out] descriptor_l1 L1 descriptor. + \param [in] s_bit Section shareability: NON_SHARED, SHARED + + \return 0 +*/ +__STATIC_INLINE int MMU_SharedSection(uint32_t *descriptor_l1, mmu_shared_Type s_bit) +{ + *descriptor_l1 &= SECTION_S_MASK; + *descriptor_l1 |= ((s_bit & 0x1) << SECTION_S_SHIFT); + return 0; +} + +/** \brief Set section Global attribute + + \param [out] descriptor_l1 L1 descriptor. + \param [in] g_bit Section attribute: GLOBAL, NON_GLOBAL + + \return 0 +*/ +__STATIC_INLINE int MMU_GlobalSection(uint32_t *descriptor_l1, mmu_global_Type g_bit) +{ + *descriptor_l1 &= SECTION_NG_MASK; + *descriptor_l1 |= ((g_bit & 0x1) << SECTION_NG_SHIFT); + return 0; +} + +/** \brief Set section Security attribute + + \param [out] descriptor_l1 L1 descriptor. + \param [in] s_bit Section Security attribute: SECURE, NON_SECURE + + \return 0 +*/ +__STATIC_INLINE int MMU_SecureSection(uint32_t *descriptor_l1, mmu_secure_Type s_bit) +{ + *descriptor_l1 &= SECTION_NS_MASK; + *descriptor_l1 |= ((s_bit & 0x1) << SECTION_NS_SHIFT); + return 0; +} + +/* Page 4k or 64k */ +/** \brief Set 4k/64k page execution-never attribute + + \param [out] descriptor_l2 L2 descriptor. + \param [in] xn Page execution-never attribute : EXECUTE , NON_EXECUTE. + \param [in] page Page size: PAGE_4k, PAGE_64k, + + \return 0 +*/ +__STATIC_INLINE int MMU_XNPage(uint32_t *descriptor_l2, mmu_execute_Type xn, mmu_region_size_Type page) +{ + if (page == PAGE_4k) + { + *descriptor_l2 &= PAGE_XN_4K_MASK; + *descriptor_l2 |= ((xn & 0x1) << PAGE_XN_4K_SHIFT); + } + else + { + *descriptor_l2 &= PAGE_XN_64K_MASK; + *descriptor_l2 |= ((xn & 0x1) << PAGE_XN_64K_SHIFT); + } + return 0; +} + +/** \brief Set 4k/64k page domain + + \param [out] descriptor_l1 L1 descriptor. + \param [in] domain Page domain + + \return 0 +*/ +__STATIC_INLINE int MMU_DomainPage(uint32_t *descriptor_l1, uint8_t domain) +{ + *descriptor_l1 &= PAGE_DOMAIN_MASK; + *descriptor_l1 |= ((domain & 0xf) << PAGE_DOMAIN_SHIFT); + return 0; +} + +/** \brief Set 4k/64k page parity check + + \param [out] descriptor_l1 L1 descriptor. + \param [in] p_bit Parity check: ECC_DISABLED, ECC_ENABLED + + \return 0 +*/ +__STATIC_INLINE int MMU_PPage(uint32_t *descriptor_l1, mmu_ecc_check_Type p_bit) +{ + *descriptor_l1 &= SECTION_P_MASK; + *descriptor_l1 |= ((p_bit & 0x1) << SECTION_P_SHIFT); + return 0; +} + +/** \brief Set 4k/64k page access privileges + + \param [out] descriptor_l2 L2 descriptor. + \param [in] user User Level Access: NO_ACCESS, RW, READ + \param [in] priv Privilege Level Access: NO_ACCESS, RW, READ + \param [in] afe Access flag enable + + \return 0 +*/ +__STATIC_INLINE int MMU_APPage(uint32_t *descriptor_l2, mmu_access_Type user, mmu_access_Type priv, uint32_t afe) +{ + uint32_t ap = 0; + + if (afe == 0) { //full access + if ((priv == NO_ACCESS) && (user == NO_ACCESS)) { ap = 0x0; } + else if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; } + else if ((priv == RW) && (user == READ)) { ap = 0x2; } + else if ((priv == RW) && (user == RW)) { ap = 0x3; } + else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; } + else if ((priv == READ) && (user == READ)) { ap = 0x6; } + } + + else { //Simplified access + if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; } + else if ((priv == RW) && (user == RW)) { ap = 0x3; } + else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; } + else if ((priv == READ) && (user == READ)) { ap = 0x7; } + } + + *descriptor_l2 &= PAGE_AP_MASK; + *descriptor_l2 |= (ap & 0x3) << PAGE_AP_SHIFT; + *descriptor_l2 |= ((ap & 0x4)>>2) << PAGE_AP2_SHIFT; + + return 0; +} + +/** \brief Set 4k/64k page shareability + + \param [out] descriptor_l2 L2 descriptor. + \param [in] s_bit 4k/64k page shareability: NON_SHARED, SHARED + + \return 0 +*/ +__STATIC_INLINE int MMU_SharedPage(uint32_t *descriptor_l2, mmu_shared_Type s_bit) +{ + *descriptor_l2 &= PAGE_S_MASK; + *descriptor_l2 |= ((s_bit & 0x1) << PAGE_S_SHIFT); + return 0; +} + +/** \brief Set 4k/64k page Global attribute + + \param [out] descriptor_l2 L2 descriptor. + \param [in] g_bit 4k/64k page attribute: GLOBAL, NON_GLOBAL + + \return 0 +*/ +__STATIC_INLINE int MMU_GlobalPage(uint32_t *descriptor_l2, mmu_global_Type g_bit) +{ + *descriptor_l2 &= PAGE_NG_MASK; + *descriptor_l2 |= ((g_bit & 0x1) << PAGE_NG_SHIFT); + return 0; +} + +/** \brief Set 4k/64k page Security attribute + + \param [out] descriptor_l1 L1 descriptor. + \param [in] s_bit 4k/64k page Security attribute: SECURE, NON_SECURE + + \return 0 +*/ +__STATIC_INLINE int MMU_SecurePage(uint32_t *descriptor_l1, mmu_secure_Type s_bit) +{ + *descriptor_l1 &= PAGE_NS_MASK; + *descriptor_l1 |= ((s_bit & 0x1) << PAGE_NS_SHIFT); + return 0; +} + +/** \brief Set Section memory attributes + + \param [out] descriptor_l1 L1 descriptor. + \param [in] mem Section memory type: NORMAL, DEVICE, SHARED_DEVICE, NON_SHARED_DEVICE, STRONGLY_ORDERED + \param [in] outer Outer cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA, + \param [in] inner Inner cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA, + + \return 0 +*/ +__STATIC_INLINE int MMU_MemorySection(uint32_t *descriptor_l1, mmu_memory_Type mem, mmu_cacheability_Type outer, mmu_cacheability_Type inner) +{ + *descriptor_l1 &= SECTION_TEXCB_MASK; + + if (STRONGLY_ORDERED == mem) + { + return 0; + } + else if (SHARED_DEVICE == mem) + { + *descriptor_l1 |= (1 << SECTION_B_SHIFT); + } + else if (NON_SHARED_DEVICE == mem) + { + *descriptor_l1 |= (1 << SECTION_TEX1_SHIFT); + } + else if (NORMAL == mem) + { + *descriptor_l1 |= 1 << SECTION_TEX2_SHIFT; + switch(inner) + { + case NON_CACHEABLE: + break; + case WB_WA: + *descriptor_l1 |= (1 << SECTION_B_SHIFT); + break; + case WT: + *descriptor_l1 |= 1 << SECTION_C_SHIFT; + break; + case WB_NO_WA: + *descriptor_l1 |= (1 << SECTION_B_SHIFT) | (1 << SECTION_C_SHIFT); + break; + } + switch(outer) + { + case NON_CACHEABLE: + break; + case WB_WA: + *descriptor_l1 |= (1 << SECTION_TEX0_SHIFT); + break; + case WT: + *descriptor_l1 |= 1 << SECTION_TEX1_SHIFT; + break; + case WB_NO_WA: + *descriptor_l1 |= (1 << SECTION_TEX0_SHIFT) | (1 << SECTION_TEX0_SHIFT); + break; + } + } + return 0; +} + +/** \brief Set 4k/64k page memory attributes + + \param [out] descriptor_l2 L2 descriptor. + \param [in] mem 4k/64k page memory type: NORMAL, DEVICE, SHARED_DEVICE, NON_SHARED_DEVICE, STRONGLY_ORDERED + \param [in] outer Outer cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA, + \param [in] inner Inner cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA, + \param [in] page Page size + + \return 0 +*/ +__STATIC_INLINE int MMU_MemoryPage(uint32_t *descriptor_l2, mmu_memory_Type mem, mmu_cacheability_Type outer, mmu_cacheability_Type inner, mmu_region_size_Type page) +{ + *descriptor_l2 &= PAGE_4K_TEXCB_MASK; + + if (page == PAGE_64k) + { + //same as section + MMU_MemorySection(descriptor_l2, mem, outer, inner); + } + else + { + if (STRONGLY_ORDERED == mem) + { + return 0; + } + else if (SHARED_DEVICE == mem) + { + *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT); + } + else if (NON_SHARED_DEVICE == mem) + { + *descriptor_l2 |= (1 << PAGE_4K_TEX1_SHIFT); + } + else if (NORMAL == mem) + { + *descriptor_l2 |= 1 << PAGE_4K_TEX2_SHIFT; + switch(inner) + { + case NON_CACHEABLE: + break; + case WB_WA: + *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT); + break; + case WT: + *descriptor_l2 |= 1 << PAGE_4K_C_SHIFT; + break; + case WB_NO_WA: + *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT) | (1 << PAGE_4K_C_SHIFT); + break; + } + switch(outer) + { + case NON_CACHEABLE: + break; + case WB_WA: + *descriptor_l2 |= (1 << PAGE_4K_TEX0_SHIFT); + break; + case WT: + *descriptor_l2 |= 1 << PAGE_4K_TEX1_SHIFT; + break; + case WB_NO_WA: + *descriptor_l2 |= (1 << PAGE_4K_TEX0_SHIFT) | (1 << PAGE_4K_TEX0_SHIFT); + break; + } + } + } + + return 0; +} + +/** \brief Create a L1 section descriptor + + \param [out] descriptor L1 descriptor + \param [in] reg Section attributes + + \return 0 +*/ +__STATIC_INLINE int MMU_GetSectionDescriptor(uint32_t *descriptor, mmu_region_attributes_Type reg) +{ + *descriptor = 0; + + MMU_MemorySection(descriptor, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t); + MMU_XNSection(descriptor,reg.xn_t); + MMU_DomainSection(descriptor, reg.domain); + MMU_PSection(descriptor, reg.e_t); + MMU_APSection(descriptor, reg.user_t, reg.priv_t, 1); + MMU_SharedSection(descriptor,reg.sh_t); + MMU_GlobalSection(descriptor,reg.g_t); + MMU_SecureSection(descriptor,reg.sec_t); + *descriptor &= SECTION_MASK; + *descriptor |= SECTION_DESCRIPTOR; + + return 0; +} + + +/** \brief Create a L1 and L2 4k/64k page descriptor + + \param [out] descriptor L1 descriptor + \param [out] descriptor2 L2 descriptor + \param [in] reg 4k/64k page attributes + + \return 0 +*/ +__STATIC_INLINE int MMU_GetPageDescriptor(uint32_t *descriptor, uint32_t *descriptor2, mmu_region_attributes_Type reg) +{ + *descriptor = 0; + *descriptor2 = 0; + + switch (reg.rg_t) + { + case PAGE_4k: + MMU_MemoryPage(descriptor2, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t, PAGE_4k); + MMU_XNPage(descriptor2, reg.xn_t, PAGE_4k); + MMU_DomainPage(descriptor, reg.domain); + MMU_PPage(descriptor, reg.e_t); + MMU_APPage(descriptor2, reg.user_t, reg.priv_t, 1); + MMU_SharedPage(descriptor2,reg.sh_t); + MMU_GlobalPage(descriptor2,reg.g_t); + MMU_SecurePage(descriptor,reg.sec_t); + *descriptor &= PAGE_L1_MASK; + *descriptor |= PAGE_L1_DESCRIPTOR; + *descriptor2 &= PAGE_L2_4K_MASK; + *descriptor2 |= PAGE_L2_4K_DESC; + break; + + case PAGE_64k: + MMU_MemoryPage(descriptor2, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t, PAGE_64k); + MMU_XNPage(descriptor2, reg.xn_t, PAGE_64k); + MMU_DomainPage(descriptor, reg.domain); + MMU_PPage(descriptor, reg.e_t); + MMU_APPage(descriptor2, reg.user_t, reg.priv_t, 1); + MMU_SharedPage(descriptor2,reg.sh_t); + MMU_GlobalPage(descriptor2,reg.g_t); + MMU_SecurePage(descriptor,reg.sec_t); + *descriptor &= PAGE_L1_MASK; + *descriptor |= PAGE_L1_DESCRIPTOR; + *descriptor2 &= PAGE_L2_64K_MASK; + *descriptor2 |= PAGE_L2_64K_DESC; + break; + + case SECTION: + //error + break; + } + + return 0; +} + +/** \brief Create a 1MB Section + + \param [in] ttb Translation table base address + \param [in] base_address Section base address + \param [in] count Number of sections to create + \param [in] descriptor_l1 L1 descriptor (region attributes) + +*/ +__STATIC_INLINE void MMU_TTSection(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1) +{ + uint32_t offset; + uint32_t entry; + uint32_t i; + + offset = base_address >> 20; + entry = (base_address & 0xFFF00000) | descriptor_l1; + + //4 bytes aligned + ttb = ttb + offset; + + for (i = 0; i < count; i++ ) + { + //4 bytes aligned + *ttb++ = entry; + entry += OFFSET_1M; + } +} + +/** \brief Create a 4k page entry + + \param [in] ttb L1 table base address + \param [in] base_address 4k base address + \param [in] count Number of 4k pages to create + \param [in] descriptor_l1 L1 descriptor (region attributes) + \param [in] ttb_l2 L2 table base address + \param [in] descriptor_l2 L2 descriptor (region attributes) + +*/ +__STATIC_INLINE void MMU_TTPage4k(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1, uint32_t *ttb_l2, uint32_t descriptor_l2 ) +{ + + uint32_t offset, offset2; + uint32_t entry, entry2; + uint32_t i; + + offset = base_address >> 20; + entry = ((int)ttb_l2 & 0xFFFFFC00) | descriptor_l1; + + //4 bytes aligned + ttb += offset; + //create l1_entry + *ttb = entry; + + offset2 = (base_address & 0xff000) >> 12; + ttb_l2 += offset2; + entry2 = (base_address & 0xFFFFF000) | descriptor_l2; + for (i = 0; i < count; i++ ) + { + //4 bytes aligned + *ttb_l2++ = entry2; + entry2 += OFFSET_4K; + } +} + +/** \brief Create a 64k page entry + + \param [in] ttb L1 table base address + \param [in] base_address 64k base address + \param [in] count Number of 64k pages to create + \param [in] descriptor_l1 L1 descriptor (region attributes) + \param [in] ttb_l2 L2 table base address + \param [in] descriptor_l2 L2 descriptor (region attributes) + +*/ +__STATIC_INLINE void MMU_TTPage64k(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1, uint32_t *ttb_l2, uint32_t descriptor_l2 ) +{ + uint32_t offset, offset2; + uint32_t entry, entry2; + uint32_t i,j; + + + offset = base_address >> 20; + entry = ((int)ttb_l2 & 0xFFFFFC00) | descriptor_l1; + + //4 bytes aligned + ttb += offset; + //create l1_entry + *ttb = entry; + + offset2 = (base_address & 0xff000) >> 12; + ttb_l2 += offset2; + entry2 = (base_address & 0xFFFF0000) | descriptor_l2; + for (i = 0; i < count; i++ ) + { + //create 16 entries + for (j = 0; j < 16; j++) + { + //4 bytes aligned + *ttb_l2++ = entry2; + } + entry2 += OFFSET_64K; + } +} + +/** \brief Enable MMU +*/ +__STATIC_INLINE void MMU_Enable(void) +{ + // Set M bit 0 to enable the MMU + // Set AFE bit to enable simplified access permissions model + // Clear TRE bit to disable TEX remap and A bit to disable strict alignment fault checking + __set_SCTLR( (__get_SCTLR() & ~(1 << 28) & ~(1 << 1)) | 1 | (1 << 29)); + __ISB(); +} + +/** \brief Disable MMU +*/ +__STATIC_INLINE void MMU_Disable(void) +{ + // Clear M bit 0 to disable the MMU + __set_SCTLR( __get_SCTLR() & ~1); + __ISB(); +} + +/** \brief Invalidate entire unified TLB +*/ +__STATIC_INLINE void MMU_InvalidateTLB(void) +{ + __set_TLBIALL(0); + __DSB(); //ensure completion of the invalidation + __ISB(); //ensure instruction fetch path sees new state +} + + + +/** \brief Enable Floating Point Unit */ +__STATIC_INLINE void __FPU_Enable(void) +{ + // Permit access to VFP/NEON, registers by modifying CPACR + const uint32_t cpacr = __get_CPACR(); + __set_CPACR(cpacr | 0x00F00000ul); + __ISB(); + + // Enable VFP/NEON + const uint32_t fpexc = __get_FPEXC(); + __set_FPEXC(fpexc | 0x40000000ul); + + __ASM volatile( + // Initialise VFP/NEON registers to 0 + " MOV R2,#0 \n" + + // Initialise D16 registers to 0 + " VMOV D0, R2,R2 \n" + " VMOV D1, R2,R2 \n" + " VMOV D2, R2,R2 \n" + " VMOV D3, R2,R2 \n" + " VMOV D4, R2,R2 \n" + " VMOV D5, R2,R2 \n" + " VMOV D6, R2,R2 \n" + " VMOV D7, R2,R2 \n" + " VMOV D8, R2,R2 \n" + " VMOV D9, R2,R2 \n" + " VMOV D10,R2,R2 \n" + " VMOV D11,R2,R2 \n" + " VMOV D12,R2,R2 \n" + " VMOV D13,R2,R2 \n" + " VMOV D14,R2,R2 \n" + " VMOV D15,R2,R2 \n" + +#if (defined(__ARM_NEON) && (__ARM_NEON == 1)) + // Initialise D32 registers to 0 + " VMOV D16,R2,R2 \n" + " VMOV D17,R2,R2 \n" + " VMOV D18,R2,R2 \n" + " VMOV D19,R2,R2 \n" + " VMOV D20,R2,R2 \n" + " VMOV D21,R2,R2 \n" + " VMOV D22,R2,R2 \n" + " VMOV D23,R2,R2 \n" + " VMOV D24,R2,R2 \n" + " VMOV D25,R2,R2 \n" + " VMOV D26,R2,R2 \n" + " VMOV D27,R2,R2 \n" + " VMOV D28,R2,R2 \n" + " VMOV D29,R2,R2 \n" + " VMOV D30,R2,R2 \n" + " VMOV D31,R2,R2 \n" +#endif + : : : "cc", "r2" + ); + + // Initialise FPSCR to a known state + const uint32_t fpscr = __get_FPSCR(); + __set_FPSCR(fpscr & 0x00086060ul); +} + + +#ifdef __cplusplus +} +#endif + +#endif /* __ARM_V7A_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/CMSIS/Core/Include/a-profile/cmsis_cp15.h b/CMSIS/Core/Include/a-profile/armv7a_cp15.h similarity index 99% rename from CMSIS/Core/Include/a-profile/cmsis_cp15.h rename to CMSIS/Core/Include/a-profile/armv7a_cp15.h index 9d5300112..201bb9685 100644 --- a/CMSIS/Core/Include/a-profile/cmsis_cp15.h +++ b/CMSIS/Core/Include/a-profile/armv7a_cp15.h @@ -1,11 +1,11 @@ /**************************************************************************//** - * @file cmsis_cp15.h + * @file armv7a_cp15.h * @brief CMSIS compiler specific macros, functions, instructions * @version V1.0.2 * @date 19. December 2022 ******************************************************************************/ /* - * Copyright (c) 2009-2017 ARM Limited. All rights reserved. + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/CMSIS/Core/Include/a-profile/armv8a.h b/CMSIS/Core/Include/a-profile/armv8a.h new file mode 100644 index 000000000..0bee2a902 --- /dev/null +++ b/CMSIS/Core/Include/a-profile/armv8a.h @@ -0,0 +1,647 @@ +/**************************************************************************//** + * @file armv8a.h + * @brief CMSIS Cortex-A Core Peripheral Access Layer Header File for ARMv8-A + * @version V1.0.0 + * @date 2. July 2023 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __ARM_V8A_GENERIC +#define __ARM_V8A_GENERIC + +#ifdef __cplusplus + extern "C" { +#endif + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup ARMv8-A + @{ + */ + +#include "cmsis_version.h" + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TMS470__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__ARM_FP) && (__ARM_FP==0xE) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ +#include "armv8a_system_control.h" + +#ifdef __cplusplus +} +#endif + +#endif /* __ARM_V8A_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __ARM_V8A_DEPENDANT +#define __ARM_V8A_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + + /* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __GIC_PRESENT + #define __GIC_PRESENT 1U + #warning "__GIC_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __TIM_PRESENT + #define __TIM_PRESENT 1U + #warning "__TIM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __L2C_PRESENT + #define __L2C_PRESENT 0U + #warning "__L2C_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __L3C_PRESENT + #define __L3C_PRESENT 0U + #warning "__L3C_PRESENT not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< \brief Defines 'read only' permissions */ +#else + #define __I volatile const /*!< \brief Defines 'read only' permissions */ +#endif +#define __O volatile /*!< \brief Defines 'write only' permissions */ +#define __IO volatile /*!< \brief Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*!< \brief Defines 'read only' structure member permissions */ +#define __OM volatile /*!< \brief Defines 'write only' structure member permissions */ +#define __IOM volatile /*!< \brief Defines 'read / write' structure member permissions */ +#define RESERVED(N, T) T RESERVED##N; // placeholder struct members used for "reserved" areas + +/** @} end of group ARMv8-A */ + + + + /******************************************************************************* + * Register Abstraction + Core Register contain: + - CPSR + - CP15 Registers + - L2C-310 Cache Controller + - Generic Interrupt Controller Distributor + - Generic Interrupt Controller Interface + ******************************************************************************/ + +/* Core Register CPSR */ +typedef union +{ + struct + { + uint32_t M:4; /*!< \brief bit: 0.. 3 Mode field */ + RESERVED(0:2, uint32_t) /* bit: 4.. 5 Reserved */ + uint32_t F:1; /*!< \brief bit: 6 FIQ mask bit */ + uint32_t I:1; /*!< \brief bit: 7 IRQ mask bit */ + uint32_t A:1; /*!< \brief bit: 8 Asynchronous abort mask bit */ + uint32_t E:1; /*!< \brief bit: 9 Endianness execution state bit */ + RESERVED(1:6, uint32_t) /* bit: 10..15 Reserved */ + uint32_t GE:4; /*!< \brief bit: 16..19 Greater than or Equal flags */ + RESERVED(2:1, uint32_t) /* bit: 20 Reserved */ + uint32_t DIT:1; /*!< \brief bit: 21 Data Independent Timing */ + uint32_t PAN:1; /*!< \brief bit: 22 Privileged Access Never */ + uint32_t SSBS:1; /*!< \brief bit: 23 Speculative Store Bypass Safe */ + RESERVED(3:3, uint32_t) /* bit: 24..26 Reserved */ + uint32_t Q:1; /*!< \brief bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< \brief bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< \brief bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< \brief bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< \brief bit: 31 Negative condition code flag */ + } b; /*!< \brief Structure used for bit access */ + uint32_t w; /*!< \brief Type used for word access */ +} CPSR_Type; + + + +/* CPSR Register Definitions */ +#define CPSR_N_Pos 31U /*!< \brief CPSR: N Position */ +#define CPSR_N_Msk (1UL << CPSR_N_Pos) /*!< \brief CPSR: N Mask */ + +#define CPSR_Z_Pos 30U /*!< \brief CPSR: Z Position */ +#define CPSR_Z_Msk (1UL << CPSR_Z_Pos) /*!< \brief CPSR: Z Mask */ + +#define CPSR_C_Pos 29U /*!< \brief CPSR: C Position */ +#define CPSR_C_Msk (1UL << CPSR_C_Pos) /*!< \brief CPSR: C Mask */ + +#define CPSR_V_Pos 28U /*!< \brief CPSR: V Position */ +#define CPSR_V_Msk (1UL << CPSR_V_Pos) /*!< \brief CPSR: V Mask */ + +#define CPSR_Q_Pos 27U /*!< \brief CPSR: Q Position */ +#define CPSR_Q_Msk (1UL << CPSR_Q_Pos) /*!< \brief CPSR: Q Mask */ + +#define CPSR_SSBS_Pos 23U /*!< \brief CPSR: SSBS Position */ +#define CPSR_SSBS_Msk (0x1UL << CPSR_SSBS_Pos) /*!< \brief CPSR: SSBS Mask */ + +#define CPSR_PAN_Pos 22U /*!< \brief CPSR: PAN Position */ +#define CPSR_PAN_Msk (0x1UL << CPSR_PAN_Pos) /*!< \brief CPSR: PAN Mask */ + +#define CPSR_DIT_Pos 21U /*!< \brief CPSR: DIT Position */ +#define CPSR_DIT_Msk (0x1UL << CPSR_DIT_Pos) /*!< \brief CPSR: DIT Mask */ + +#define CPSR_GE_Pos 16U /*!< \brief CPSR: GE Position */ +#define CPSR_GE_Msk (0xFUL << CPSR_GE_Pos) /*!< \brief CPSR: GE Mask */ + +#define CPSR_E_Pos 9U /*!< \brief CPSR: E Position */ +#define CPSR_E_Msk (1UL << CPSR_E_Pos) /*!< \brief CPSR: E Mask */ + +#define CPSR_A_Pos 8U /*!< \brief CPSR: A Position */ +#define CPSR_A_Msk (1UL << CPSR_A_Pos) /*!< \brief CPSR: A Mask */ + +#define CPSR_I_Pos 7U /*!< \brief CPSR: I Position */ +#define CPSR_I_Msk (1UL << CPSR_I_Pos) /*!< \brief CPSR: I Mask */ + +#define CPSR_F_Pos 6U /*!< \brief CPSR: F Position */ +#define CPSR_F_Msk (1UL << CPSR_F_Pos) /*!< \brief CPSR: F Mask */ + +#define CPSR_M_Pos 0U /*!< \brief CPSR: M Position */ +#define CPSR_M_Msk (0xFUL << CPSR_M_Pos) /*!< \brief CPSR: M Mask */ + +#define CPSR_M_USR 0x10U /*!< \brief CPSR: M User mode (PL0) */ +#define CPSR_M_FIQ 0x11U /*!< \brief CPSR: M Fast Interrupt mode (PL1) */ +#define CPSR_M_IRQ 0x12U /*!< \brief CPSR: M Interrupt mode (PL1) */ +#define CPSR_M_SVC 0x13U /*!< \brief CPSR: M Supervisor mode (PL1) */ +#define CPSR_M_MON 0x16U /*!< \brief CPSR: M Monitor mode (PL1) */ +#define CPSR_M_ABT 0x17U /*!< \brief CPSR: M Abort mode (PL1) */ +#define CPSR_M_HYP 0x1AU /*!< \brief CPSR: M Hypervisor mode (PL2) */ +#define CPSR_M_UND 0x1BU /*!< \brief CPSR: M Undefined mode (PL1) */ +#define CPSR_M_SYS 0x1FU /*!< \brief CPSR: M System mode (PL1) */ + +/* Register SCTLR */ +typedef union +{ + struct + { + uint64_t M:1; /*!< \brief bit: 0 MMU enable */ + uint64_t A:1; /*!< \brief bit: 1 Alignment check enable */ + uint64_t C:1; /*!< \brief bit: 2 Cache enable */ + uint64_t SA:1; /*!< \brief bit: 3 SP Alignment check enable */ + RESERVED(1:2, uint64_t) //[5:4] + uint64_t nAA:1; /*!< \brief bit: 6 Non-aligned access */ + RESERVED(2:4, uint64_t) //[10:7] + uint64_t EOS:1; /*!< \brief bit: 11 Exception Exit is Context Synchronizing */ + uint64_t I:1; /*!< \brief bit: 12 Instruction cache enable */ + uint64_t EnDB:1; //13 + RESERVED(3:2, uint64_t) //[15:14] + RESERVED(4:1, uint64_t) //[16] + RESERVED(5:1, uint64_t) //[17] + RESERVED(6:1, uint64_t) //[18] + uint64_t WXN:1; /*!< \brief bit: 19 Write permission implies XN */ + RESERVED(7:1, uint64_t) //[20] + uint64_t IESB:1; //21 + uint64_t EIS:1; //22 + RESERVED(8:1, uint64_t) //[23] + RESERVED(9:1, uint64_t) //[24] + uint64_t EE:1; /*!< \brief bit: 25 Exception Endianness */ + RESERVED(10:1, uint64_t) //[26] + uint64_t EnDA:1; //27 + RESERVED(11:2, uint64_t) //[29:28] + uint64_t EnIB:1; //30 + uint64_t EnIA:1; //31 + RESERVED(12:4, uint64_t) //[35:32] + uint64_t BT:1; //36 + uint64_t ITFSB:1; //37 + RESERVED(13:2, uint64_t) //[39:38] + uint64_t TCF:2; //[41:40] + RESERVED(14:1, uint64_t) //[42] + uint64_t ATA:1; //43 + uint64_t DSSBS:1; //44 + RESERVED(15:19, uint64_t) //[63:45] + } b; /*!< \brief Structure used for bit access */ + uint64_t w; /*!< \brief Type used for word access */ +} SCTLR_Type; + +#define SCTLR_TE_Pos 30U /*!< \brief SCTLR: TE Position */ +#define SCTLR_TE_Msk (1UL << SCTLR_TE_Pos) /*!< \brief SCTLR: TE Mask */ + +#define SCTLR_AFE_Pos 29U /*!< \brief SCTLR: AFE Position */ +#define SCTLR_AFE_Msk (1UL << SCTLR_AFE_Pos) /*!< \brief SCTLR: AFE Mask */ + +#define SCTLR_TRE_Pos 28U /*!< \brief SCTLR: TRE Position */ +#define SCTLR_TRE_Msk (1UL << SCTLR_TRE_Pos) /*!< \brief SCTLR: TRE Mask */ + +#define SCTLR_EE_Pos 25U /*!< \brief SCTLR: EE Position */ +#define SCTLR_EE_Msk (1UL << SCTLR_EE_Pos) /*!< \brief SCTLR: EE Mask */ + +#define SCTLR_UWXN_Pos 20U /*!< \brief SCTLR: UWXN Position */ +#define SCTLR_UWXN_Msk (1UL << SCTLR_UWXN_Pos) /*!< \brief SCTLR: UWXN Mask */ + +#define SCTLR_WXN_Pos 19U /*!< \brief SCTLR: WXN Position */ +#define SCTLR_WXN_Msk (1UL << SCTLR_WXN_Pos) /*!< \brief SCTLR: WXN Mask */ + +#define SCTLR_nTWE_Pos 18U /*!< \brief SCTLR: nTWE Position */ +#define SCTLR_nTWE_Msk (1UL << SCTLR_nTWE_Pos) /*!< \brief SCTLR: nTWE Mask */ + +#define SCTLR_nTWI_Pos 16U /*!< \brief SCTLR: nTWI Position */ +#define SCTLR_nTWI_Msk (1UL << SCTLR_nTWI_Pos) /*!< \brief SCTLR: nTWI Mask */ + +#define SCTLR_V_Pos 13U /*!< \brief SCTLR: V Position */ +#define SCTLR_V_Msk (1UL << SCTLR_V_Pos) /*!< \brief SCTLR: V Mask */ + +#define SCTLR_I_Pos 12U /*!< \brief SCTLR: I Position */ +#define SCTLR_I_Msk (1UL << SCTLR_I_Pos) /*!< \brief SCTLR: I Mask */ + +#define SCTLR_SED_Pos 8U /*!< \brief SCTLR: SED Position */ +#define SCTLR_SED_Msk (1UL << SCTLR_SED_Pos) /*!< \brief SCTLR: SED Mask */ + +#define SCTLR_ITD_Pos 7U /*!< \brief SCTLR: ITD Position */ +#define SCTLR_ITD_Msk (1UL << SCTLR_ITD_Pos) /*!< \brief SCTLR: ITD Mask */ + +#define SCTLR_THEE_Pos 6U /*!< \brief SCTLR: THEE Position */ +#define SCTLR_THEE_Msk (1UL << SCTLR_THEE_Pos) /*!< \brief SCTLR: THEE Mask */ + +#define SCTLR_CP15BEN_Pos 5U /*!< \brief SCTLR: CP15BEN Position */ +#define SCTLR_CP15BEN_Msk (1UL << SCTLR_CP15BEN_Pos) /*!< \brief SCTLR: CP15BEN Mask */ + +#define SCTLR_C_Pos 2U /*!< \brief SCTLR: C Position */ +#define SCTLR_C_Msk (1UL << SCTLR_C_Pos) /*!< \brief SCTLR: C Mask */ + +#define SCTLR_A_Pos 1U /*!< \brief SCTLR: A Position */ +#define SCTLR_A_Msk (1UL << SCTLR_A_Pos) /*!< \brief SCTLR: A Mask */ + +#define SCTLR_M_Pos 0U /*!< \brief SCTLR: M Position */ +#define SCTLR_M_Msk (1UL << SCTLR_M_Pos) /*!< \brief SCTLR: M Mask */ + + +/* Register TCR_EL3 */ +typedef union +{ + struct + { + uint64_t T0SZ:6; //[5:0] + RESERVED(1:2, uint64_t) //[7:6] + uint64_t IRGN0:2; //[9:8] + uint64_t ORGN0:2; //[11:10] + uint64_t SH0:2; //[13:12] + uint64_t TG0:2; //[15:14] + uint64_t PS:3; //[18:16] + RESERVED(2:1, uint64_t) //[19] + uint64_t TBI:1; //[20] + uint64_t HA:1; //[21] + uint64_t HD:1; //[22] + RESERVED(3:1, uint64_t) //[23] + uint64_t HPD:1; //[24] + uint64_t HWU59:1; //[25] + uint64_t HWU60:1; //[26] + uint64_t HWU61:1; //[27] + uint64_t HWU62:1; //[28] + uint64_t TBID:1; //[29] + uint64_t TCMA:1; //[30] + RESERVED(4:1, uint64_t) //[31] + RESERVED(5:32, uint64_t) //[63:32] + } b; + uint64_t w; /*!< \brief Type used for word access */ +} TCR_EL3_Type; + + +/* Register MPIDR_EL1 */ +typedef union +{ + struct + { + uint64_t Aff0:8; + uint64_t Aff1:8; + uint64_t Aff2:8; + uint64_t MT:1; + RESERVED(0:5, uint64_t) + uint64_t U:1; + RESERVED(1:1, uint64_t) + uint64_t Aff3:8; + RESERVED(2:24, uint64_t) + } b; /*!< \brief Structure used for bit access */ + uint64_t w; /*!< \brief Type used for word access */ +} MPIDR_EL1_Type; + + + /******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - L1 Cache Functions + - L2C-310 Cache Controller Functions + - PL1 Timer Functions + - GIC Functions + - MMU Functions + ******************************************************************************/ + +/* ########################## L1 Cache functions ################################# */ + +/** \brief Enable Caches by setting I and C bits in SCTLR register. +*/ +__STATIC_FORCEINLINE void L1C_EnableCaches(void) { + __set_SCTLR_EL3( __get_SCTLR_EL3() | SCTLR_I_Msk | SCTLR_C_Msk); + __ISB(); +} + +/** \brief Disable Caches by clearing I and C bits in SCTLR register. +*/ +__STATIC_FORCEINLINE void L1C_DisableCaches(void) { + __set_SCTLR_EL3( __get_SCTLR_EL3() & (~SCTLR_I_Msk) & (~SCTLR_C_Msk)); + __ISB(); +} + +/** \brief Enable Branch Prediction by setting Z bit in SCTLR register. +*/ +__STATIC_FORCEINLINE void L1C_EnableBTAC(void) { + +} + +/** \brief Disable Branch Prediction by clearing Z bit in SCTLR register. +*/ +__STATIC_FORCEINLINE void L1C_DisableBTAC(void) { + +} + +/** \brief Invalidate entire branch predictor array +*/ +__STATIC_FORCEINLINE void L1C_InvalidateBTAC(void) { + +} + +/** \brief Invalidate the whole instruction cache +*/ +__STATIC_FORCEINLINE void L1C_InvalidateICacheAll(void) { + +} + +/** \brief Invalidate the whole data cache. +*/ +__STATIC_FORCEINLINE void L1C_InvalidateDCacheAll(void) { + +} + + +/* ########################## L2 Cache functions ################################# */ +#if (__L2C_PRESENT == 1U) || defined(DOXYGEN) + +/** \brief Enable Level 2 Cache +*/ +__STATIC_INLINE void L2C_Enable(void) +{ + +} +#endif + +/* ########################## L3 Cache functions ################################# */ +#if (__L3C_PRESENT == 1U) || defined(DOXYGEN) + +#endif + +/* ########################## GIC functions ###################################### */ +#if (__GIC_PRESENT == 1U) || defined(DOXYGEN) + +#endif + +/* ########################## Generic Timer functions ############################ */ +#if (__TIM_PRESENT == 1U) || defined(DOXYGEN) + +#endif + +/* ########################## MMU functions ###################################### */ + +/** \brief Enable MMU +*/ +__STATIC_INLINE void MMU_Enable(void) +{ + __set_SCTLR_EL3( __get_SCTLR_EL3() | SCTLR_M_Msk); + __ISB(); +} + +/** \brief Disable MMU +*/ +__STATIC_INLINE void MMU_Disable(void) +{ + __set_SCTLR_EL3( __get_SCTLR_EL3() & (~SCTLR_M_Msk)); + __ISB(); +} + +/** \brief Invalidate entire unified TLB +*/ + +__STATIC_INLINE void MMU_InvalidateTLB(void) +{ + __DSB(); + __ASM volatile("tlbi vmalle1is"); + __DSB(); + __ISB(); +} + + + +/** \brief Enable Floating Point Unit */ +__STATIC_INLINE void __FPU_Enable(void) +{ + __ASM volatile( + // In AArch64, you do not need to enable access to the NEON and FP registers. + // However, access to the NEON and FP registers can still be trapped. + + // Disable trapping of accessing in EL3 and EL2. + " MSR CPTR_EL3, XZR \n" + " MSR CPTR_EL2, XZR \n" + + // Disable access trapping in EL1 and EL0. + " MOV X1, #(0x3 << 20) \n" + + // FPEN disables trapping to EL1. + " MSR CPACR_EL1, X1 \n" + + //Ensure that subsequent instructions occur in the context of VFP/NEON access permitted + " ISB " + + : : : "cc", "x1" + ); +} + +#ifdef __cplusplus +} +#endif + +#endif /* __ARM_V8A_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/CMSIS/Core/Include/a-profile/armv8a_system_control.h b/CMSIS/Core/Include/a-profile/armv8a_system_control.h new file mode 100644 index 000000000..d03cafc40 --- /dev/null +++ b/CMSIS/Core/Include/a-profile/armv8a_system_control.h @@ -0,0 +1,143 @@ +/**************************************************************************//** + * @file armv8a_system_control.h + * @brief CMSIS compiler specific macros, functions, instructions + * @version V6.0.0 + * @date 4. August 2023 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include + + #if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CMSIS_SYSTEM_CONTROL_H +#define __CMSIS_SYSTEM_CONTROL_H + + + /** \brief Get MPIDR EL1 + \return Multiprocessor Affinity Register value + */ + __STATIC_FORCEINLINE uint64_t __get_MPIDR_EL1(void) + { + uint64_t result; + __ASM volatile("MRS %0, MPIDR_EL1" : "=r" (result) : : "memory"); + return result; + } + + /** \brief Get MAIR EL3 + \return MAIR value + */ + __STATIC_FORCEINLINE uint64_t __get_MAIR_EL3(void) + { + uint64_t result; + __ASM volatile("MRS %0, mair_el3" : "=r" (result) : : "memory"); + return result; + } + + /** \brief Set MAIR EL3 + \param [in] mair MAIR value to set + */ + __STATIC_FORCEINLINE void __set_MAIR_EL3(uint64_t mair) + { + __ASM volatile("MSR mair_el3, %0" : : "r" (mair) : "memory"); + } + + /** \brief Get TCR EL3 + \return TCR value + */ + __STATIC_FORCEINLINE uint64_t __get_TCR_EL3(void) + { + uint64_t result; + __ASM volatile("MRS %0, tcr_el3" : "=r" (result) : : "memory"); + return result; + } + + /** \brief Set TCR EL3 + \param [in] tcr TCR value to set + */ + __STATIC_FORCEINLINE void __set_TCR_EL3(uint64_t tcr) + { + __ASM volatile("MSR tcr_el3, %0" : : "r" (tcr) : "memory"); + } + + /** \brief Get TTBR0 EL3 + \return Translation Table Base Register 0 value + */ + __STATIC_FORCEINLINE uint64_t __get_TTBR0_EL3(void) + { + uint64_t result; + __ASM volatile("MRS %0, ttbr0_el3" : "=r" (result) : : "memory"); + return result; + } + + /** \brief Set TTBR0 EL3 + \param [in] ttbr0 Translation Table Base Register 0 value to set + */ + __STATIC_FORCEINLINE void __set_TTBR0_EL3(uint64_t ttbr0) + { + __ASM volatile("MSR ttbr0_el3, %0" : : "r" (ttbr0) : "memory"); + } + + /** \brief Get SCTLR EL3 + \return STRLR EL3 value + */ + __STATIC_FORCEINLINE uint64_t __get_SCTLR_EL3(void) + { + uint64_t result; + __ASM volatile("MRS %0, sctlr_el3" : "=r" (result) : : "memory"); + return result; + } + + /** \brief Set SCTLR EL3 + \param [in] vbar SCTLR value to set + */ + __STATIC_FORCEINLINE void __set_SCTLR_EL3(uint64_t sctlr) + { + __ASM volatile("MSR sctlr_el3, %0" : : "r" (sctlr) : "memory"); + } + + /** \brief Set VBAR EL3 + \param [in] vbar VBAR value to set + */ + __STATIC_FORCEINLINE void __set_VBAR_EL3(uint64_t vbar) + { + __ASM volatile("MSR vbar_el3, %0" : : "r" (vbar) : "memory"); + } + + /** \brief Set VBAR EL2 + \param [in] vbar VBAR value to set + */ + __STATIC_FORCEINLINE void __set_VBAR_EL2(uint64_t vbar) + { + __ASM volatile("MSR vbar_el2, %0" : : "r" (vbar) : "memory"); + } + + /** \brief Set VBAR EL1 + \param [in] vbar VBAR value to set + */ + __STATIC_FORCEINLINE void __set_VBAR_EL1(uint64_t vbar) + { + __ASM volatile("MSR vbar_el1, %0" : : "r" (vbar) : "memory"); + } + + +#endif /* __CMSIS_SYSTEM_CONTROL_H */ \ No newline at end of file diff --git a/CMSIS/Core/Include/a-profile/cmsis_armclang_a.h b/CMSIS/Core/Include/a-profile/cmsis_armclang_a.h index 31ed87678..db0c96b2c 100644 --- a/CMSIS/Core/Include/a-profile/cmsis_armclang_a.h +++ b/CMSIS/Core/Include/a-profile/cmsis_armclang_a.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file cmsis_armclang_a.h * @brief CMSIS compiler armclang (Arm Compiler 6) header file - * @version V1.2.2 - * @date 13. November 2022 + * @version V6.0.0 + * @date 4. August 2023 ******************************************************************************/ /* - * Copyright (c) 2009-2021 Arm Limited. All rights reserved. + * Copyright (c) 2009-2023 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -22,435 +22,23 @@ * limitations under the License. */ +/*lint -esym(9058, IRQn)*/ /* disable MISRA 2012 Rule 2.4 for IRQn */ + #ifndef __CMSIS_ARMCLANG_A_H #define __CMSIS_ARMCLANG_A_H #pragma clang system_header /* treat file as system include file */ -/* CMSIS compiler specific defines */ -#ifndef __ASM - #define __ASM __asm -#endif -#ifndef __INLINE - #define __INLINE __inline -#endif -#ifndef __FORCEINLINE - #define __FORCEINLINE __attribute__((always_inline)) -#endif -#ifndef __STATIC_INLINE - #define __STATIC_INLINE static __inline -#endif -#ifndef __STATIC_FORCEINLINE - #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline -#endif -#ifndef __NO_RETURN - #define __NO_RETURN __attribute__((__noreturn__)) -#endif -#ifndef CMSIS_DEPRECATED - #define CMSIS_DEPRECATED __attribute__((deprecated)) -#endif -#ifndef __USED - #define __USED __attribute__((used)) -#endif -#ifndef __WEAK - #define __WEAK __attribute__((weak)) -#endif -#ifndef __PACKED - #define __PACKED __attribute__((packed, aligned(1))) -#endif -#ifndef __PACKED_STRUCT - #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) -#endif -#ifndef __UNALIGNED_UINT16_WRITE - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" -/*lint -esym(9058, T_UINT16_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_WRITE */ - __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT16_READ - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" -/*lint -esym(9058, T_UINT16_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_READ */ - __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) -#endif -#ifndef __UNALIGNED_UINT32_WRITE - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" -/*lint -esym(9058, T_UINT32_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_WRITE */ - __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT32_READ - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) -#endif -#ifndef __ALIGNED - #define __ALIGNED(x) __attribute__((aligned(x))) -#endif -#ifndef __PACKED - #define __PACKED __attribute__((packed)) -#endif -#ifndef __COMPILER_BARRIER - #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#ifndef __CMSIS_ARMCLANG_H + #error "This file must not be included directly" #endif -/* ########################## Core Instruction Access ######################### */ -/** - \brief No Operation - \details No Operation does nothing. This instruction can be used for code alignment purposes. - */ -#define __NOP __builtin_arm_nop - -/** - \brief Wait For Interrupt - \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. - */ -#define __WFI __builtin_arm_wfi - - -/** - \brief Wait For Event - \details Wait For Event is a hint instruction that permits the processor to enter - a low-power state until one of a number of events occurs. - */ -#define __WFE __builtin_arm_wfe - - -/** - \brief Send Event - \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. - */ -#define __SEV __builtin_arm_sev - - -/** - \brief Instruction Synchronization Barrier - \details Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or memory, - after the instruction has been completed. - */ -#define __ISB() __builtin_arm_isb(0xF) - -/** - \brief Data Synchronization Barrier - \details Acts as a special kind of Data Memory Barrier. - It completes when all explicit memory accesses before this instruction complete. - */ -#define __DSB() __builtin_arm_dsb(0xF) - - -/** - \brief Data Memory Barrier - \details Ensures the apparent order of the explicit memory operations before - and after the instruction, without ensuring their completion. - */ -#define __DMB() __builtin_arm_dmb(0xF) - - -/** - \brief Reverse byte order (32 bit) - \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REV(value) __builtin_bswap32(value) - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REV16(value) __ROR(__REV(value), 16) - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REVSH(value) (int16_t)__builtin_bswap16(value) - - -/** - \brief Rotate Right in unsigned value (32 bit) - \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. - \param [in] op1 Value to rotate - \param [in] op2 Number of Bits to rotate - \return Rotated value - */ -__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) -{ - op2 %= 32U; - if (op2 == 0U) - { - return op1; - } - return (op1 >> op2) | (op1 << (32U - op2)); -} - - -/** - \brief Breakpoint - \details Causes the processor to enter Debug state. - Debug tools can use this to investigate system state when the instruction at a particular address is reached. - \param [in] value is ignored by the processor. - If required, a debugger can use it to store additional information about the breakpoint. - */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) - - -/** - \brief Reverse bit order of value - \details Reverses the bit order of the given value. - \param [in] value Value to reverse - \return Reversed value - */ -#define __RBIT __builtin_arm_rbit - -/** - \brief Count leading zeros - \details Counts the number of leading zeros of a data value. - \param [in] value Value to count the leading zeros - \return number of leading zeros in value - */ -__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) -{ - /* Even though __builtin_clz produces a CLZ instruction on ARM, formally - __builtin_clz(0) is undefined behaviour, so handle this case specially. - This guarantees ARM-compatible results if happening to compile on a non-ARM - target, and ensures the compiler doesn't decide to activate any - optimisations using the logic "value was passed to __builtin_clz, so it - is non-zero". - ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a - single CLZ instruction. - */ - if (value == 0U) - { - return 32U; - } - return __builtin_clz(value); -} - -/** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -#define __LDREXB (uint8_t)__builtin_arm_ldrex - - -/** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -#define __LDREXH (uint16_t)__builtin_arm_ldrex - - -/** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -#define __LDREXW (uint32_t)__builtin_arm_ldrex - - -/** - \brief STR Exclusive (8 bit) - \details Executes a exclusive STR instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXB (uint32_t)__builtin_arm_strex - - -/** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXH (uint32_t)__builtin_arm_strex - - -/** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXW (uint32_t)__builtin_arm_strex - - -/** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. - */ -#define __CLREX __builtin_arm_clrex - -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value - */ -#define __SSAT __builtin_arm_ssat - - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -#define __USAT __builtin_arm_usat - -/* ################### Compiler specific Intrinsics ########################### */ -/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics - Access to dedicated SIMD instructions - @{ -*/ - -#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) - -#define __SADD8 __builtin_arm_sadd8 -#define __SADD16 __builtin_arm_sadd16 -#define __QADD8 __builtin_arm_qadd8 -#define __QSUB8 __builtin_arm_qsub8 -#define __QADD16 __builtin_arm_qadd16 -#define __SHADD16 __builtin_arm_shadd16 -#define __QSUB16 __builtin_arm_qsub16 -#define __SHSUB16 __builtin_arm_shsub16 -#define __QASX __builtin_arm_qasx -#define __SHASX __builtin_arm_shasx -#define __QSAX __builtin_arm_qsax -#define __SHSAX __builtin_arm_shsax -#define __SXTB16 __builtin_arm_sxtb16 -#define __SMUAD __builtin_arm_smuad -#define __SMUADX __builtin_arm_smuadx -#define __SMLAD __builtin_arm_smlad -#define __SMLADX __builtin_arm_smladx -#define __SMLALD __builtin_arm_smlald -#define __SMLALDX __builtin_arm_smlaldx -#define __SMUSD __builtin_arm_smusd -#define __SMUSDX __builtin_arm_smusdx -#define __SMLSDX __builtin_arm_smlsdx -#define __USAT16 __builtin_arm_usat16 -#define __SSUB8 __builtin_arm_ssub8 -#define __SXTB16 __builtin_arm_sxtb16 -#define __SXTAB16 __builtin_arm_sxtab16 - - -__STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2) -{ - int32_t result; - - __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) -{ - int32_t result; - - __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ - ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) - -#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ - ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) - -__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) -{ - int32_t result; - - __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); - return(result); -} - -#endif /* (__ARM_FEATURE_DSP == 1) */ /* ########################### Core Function Access ########################### */ - -/** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing the I-bit in the CPSR. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __enable_irq(void) -{ - __ASM volatile ("cpsie i" : : : "memory"); -} - -/** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting the I-bit in the CPSR. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_irq(void) -{ - __ASM volatile ("cpsid i" : : : "memory"); -} - -/** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __enable_fault_irq(void) -{ - __ASM volatile ("cpsie f" : : : "memory"); -} - - -/** - \brief Disable FIQ - \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_fault_irq(void) -{ - __ASM volatile ("cpsid f" : : : "memory"); -} - -/** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value - */ -#define __get_FPSCR __builtin_arm_get_fpscr - -/** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ */ -#define __set_FPSCR __builtin_arm_set_fpscr /** \brief Get CPSR Register \return CPSR Register value @@ -560,6 +148,9 @@ __STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc) #endif } +/** @} end of CMSIS_Core_RegAccFunctions */ + + /* * Include common core functions to access Coprocessor 15 registers */ @@ -569,76 +160,4 @@ __STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc) #define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" ) #define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" ) -#include "cmsis_cp15.h" - -/** \brief Enable Floating Point Unit - - Critical section, called from undef handler, so systick is disabled - */ -__STATIC_INLINE void __FPU_Enable(void) -{ - __ASM volatile( - //Permit access to VFP/NEON, registers by modifying CPACR - " MRC p15,0,R1,c1,c0,2 \n" - " ORR R1,R1,#0x00F00000 \n" - " MCR p15,0,R1,c1,c0,2 \n" - - //Ensure that subsequent instructions occur in the context of VFP/NEON access permitted - " ISB \n" - - //Enable VFP/NEON - " VMRS R1,FPEXC \n" - " ORR R1,R1,#0x40000000 \n" - " VMSR FPEXC,R1 \n" - - //Initialise VFP/NEON registers to 0 - " MOV R2,#0 \n" - - //Initialise D16 registers to 0 - " VMOV D0, R2,R2 \n" - " VMOV D1, R2,R2 \n" - " VMOV D2, R2,R2 \n" - " VMOV D3, R2,R2 \n" - " VMOV D4, R2,R2 \n" - " VMOV D5, R2,R2 \n" - " VMOV D6, R2,R2 \n" - " VMOV D7, R2,R2 \n" - " VMOV D8, R2,R2 \n" - " VMOV D9, R2,R2 \n" - " VMOV D10,R2,R2 \n" - " VMOV D11,R2,R2 \n" - " VMOV D12,R2,R2 \n" - " VMOV D13,R2,R2 \n" - " VMOV D14,R2,R2 \n" - " VMOV D15,R2,R2 \n" - -#if (defined(__ARM_NEON) && (__ARM_NEON == 1)) - //Initialise D32 registers to 0 - " VMOV D16,R2,R2 \n" - " VMOV D17,R2,R2 \n" - " VMOV D18,R2,R2 \n" - " VMOV D19,R2,R2 \n" - " VMOV D20,R2,R2 \n" - " VMOV D21,R2,R2 \n" - " VMOV D22,R2,R2 \n" - " VMOV D23,R2,R2 \n" - " VMOV D24,R2,R2 \n" - " VMOV D25,R2,R2 \n" - " VMOV D26,R2,R2 \n" - " VMOV D27,R2,R2 \n" - " VMOV D28,R2,R2 \n" - " VMOV D29,R2,R2 \n" - " VMOV D30,R2,R2 \n" - " VMOV D31,R2,R2 \n" -#endif - - //Initialise FPSCR to a known state - " VMRS R1,FPSCR \n" - " LDR R2,=0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero. - " AND R1,R1,R2 \n" - " VMSR FPSCR,R1 " - : : : "cc", "r1", "r2" - ); -} - #endif /* __CMSIS_ARMCLANG_A_H */ diff --git a/CMSIS/Core/Include/a-profile/cmsis_clang_a.h b/CMSIS/Core/Include/a-profile/cmsis_clang_a.h new file mode 100644 index 000000000..2fdf0a73e --- /dev/null +++ b/CMSIS/Core/Include/a-profile/cmsis_clang_a.h @@ -0,0 +1,168 @@ +/**************************************************************************//** + * @file cmsis_clang_a.h + * @brief CMSIS compiler armclang (Arm Compiler 6) header file + * @version V5.5.0 + * @date 04. December 2022 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_CLANG_COREA_H +#define __CMSIS_CLANG_COREA_H + +#pragma clang system_header /* treat file as system include file */ + +#ifndef __CMSIS_CLANG_H + #error "This file must not be included directly" +#endif + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** \brief Get CPSR Register + \return CPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CPSR(void) +{ + uint32_t result; + __ASM volatile("MRS %0, cpsr" : "=r" (result) ); + return(result); +} + +/** \brief Set CPSR Register + \param [in] cpsr CPSR value to set + */ +__STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr) +{ + __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory"); +} + +/** \brief Get Mode + \return Processor Mode + */ +__STATIC_FORCEINLINE uint32_t __get_mode(void) +{ + return (__get_CPSR() & 0x1FU); +} + +/** \brief Set Mode + \param [in] mode Mode value to set + */ +__STATIC_FORCEINLINE void __set_mode(uint32_t mode) +{ + __ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory"); +} + +/** \brief Get Stack Pointer + \return Stack Pointer value + */ +__STATIC_FORCEINLINE uint32_t __get_SP(void) +{ + uint32_t result; + __ASM volatile("MOV %0, sp" : "=r" (result) : : "memory"); + return result; +} + +/** \brief Set Stack Pointer + \param [in] stack Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_SP(uint32_t stack) +{ + __ASM volatile("MOV sp, %0" : : "r" (stack) : "memory"); +} + +/** \brief Get USR/SYS Stack Pointer + \return USR/SYS Stack Pointer value + */ +__STATIC_FORCEINLINE uint32_t __get_SP_usr(void) +{ + uint32_t cpsr; + uint32_t result; + __ASM volatile( + "MRS %0, cpsr \n" + "CPS #0x1F \n" // no effect in USR mode + "MOV %1, sp \n" + "MSR cpsr_c, %0 \n" // no effect in USR mode + "ISB" : "=r"(cpsr), "=r"(result) : : "memory" + ); + return result; +} + +/** \brief Set USR/SYS Stack Pointer + \param [in] topOfProcStack USR/SYS Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_SP_usr(uint32_t topOfProcStack) +{ + uint32_t cpsr; + __ASM volatile( + "MRS %0, cpsr \n" + "CPS #0x1F \n" // no effect in USR mode + "MOV sp, %1 \n" + "MSR cpsr_c, %0 \n" // no effect in USR mode + "ISB" : "=r"(cpsr) : "r" (topOfProcStack) : "memory" + ); +} + +/** \brief Get FPEXC + \return Floating Point Exception Control register value + */ +__STATIC_FORCEINLINE uint32_t __get_FPEXC(void) +{ +#if (__FPU_PRESENT == 1) + uint32_t result; + __ASM volatile("VMRS %0, fpexc" : "=r" (result) : : "memory"); + return(result); +#else + return(0); +#endif +} + +/** \brief Set FPEXC + \param [in] fpexc Floating Point Exception Control value to set + */ +__STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc) +{ +#if (__FPU_PRESENT == 1) + __ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory"); +#endif +} + +/** @} end of CMSIS_Core_RegAccFunctions */ + +/* + * Include common core functions to access Coprocessor 15 registers + */ + +#define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" ) +#define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" ) +#define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" ) +#define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" ) + +#if __CORTEX_A == 5 ||__CORTEX_A == 7 || __CORTEX_A == 9 + #include "armv7a_cp15.h" +#elif __CORTEX_A == 35 || __CORTEX_A == 53 || __CORTEX_A == 57 + #include "armv8a_system_control.h" +#else + #warning "Unknown or unsupported core" +#endif + +#endif /* __CMSIS_CLANG_COREA_H */ diff --git a/CMSIS/Core/Include/a-profile/cmsis_gcc_a.h b/CMSIS/Core/Include/a-profile/cmsis_gcc_a.h index 544035091..91243c4ab 100644 --- a/CMSIS/Core/Include/a-profile/cmsis_gcc_a.h +++ b/CMSIS/Core/Include/a-profile/cmsis_gcc_a.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file cmsis_gcc_a.h * @brief CMSIS compiler GCC header file - * @version V1.3.3 - * @date 13. November 2022 + * @version V6.0.0 + * @date 4. August 2023 ******************************************************************************/ /* - * Copyright (c) 2009-2022 Arm Limited. All rights reserved. + * Copyright (c) 2009-2023 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -25,753 +25,21 @@ #ifndef __CMSIS_GCC_A_H #define __CMSIS_GCC_A_H +#ifndef __CMSIS_GCC_H + #error "This file must not be included directly" +#endif + /* ignore some GCC warnings */ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wsign-conversion" #pragma GCC diagnostic ignored "-Wconversion" #pragma GCC diagnostic ignored "-Wunused-parameter" -/* Fallback for __has_builtin */ -#ifndef __has_builtin - #define __has_builtin(x) (0) -#endif - -/* CMSIS compiler specific defines */ -#ifndef __ASM - #define __ASM __asm -#endif -#ifndef __INLINE - #define __INLINE inline -#endif -#ifndef __FORCEINLINE - #define __FORCEINLINE __attribute__((always_inline)) -#endif -#ifndef __STATIC_INLINE - #define __STATIC_INLINE static inline -#endif -#ifndef __STATIC_FORCEINLINE - #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline -#endif -#ifndef __NO_RETURN - #define __NO_RETURN __attribute__((__noreturn__)) -#endif -#ifndef CMSIS_DEPRECATED - #define CMSIS_DEPRECATED __attribute__((deprecated)) -#endif -#ifndef __USED - #define __USED __attribute__((used)) -#endif -#ifndef __WEAK - #define __WEAK __attribute__((weak)) -#endif -#ifndef __PACKED - #define __PACKED __attribute__((packed, aligned(1))) -#endif -#ifndef __PACKED_STRUCT - #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) -#endif -#ifndef __UNALIGNED_UINT16_WRITE - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wpacked" - #pragma GCC diagnostic ignored "-Wattributes" - __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; - #pragma GCC diagnostic pop - #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT16_READ - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wpacked" - #pragma GCC diagnostic ignored "-Wattributes" - __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; - #pragma GCC diagnostic pop - #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) -#endif -#ifndef __UNALIGNED_UINT32_WRITE - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wpacked" - #pragma GCC diagnostic ignored "-Wattributes" - __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; - #pragma GCC diagnostic pop - #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT32_READ - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wpacked" - #pragma GCC diagnostic ignored "-Wattributes" - __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; - #pragma GCC diagnostic pop - #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) -#endif -#ifndef __ALIGNED - #define __ALIGNED(x) __attribute__((aligned(x))) -#endif -#ifndef __RESTRICT - #define __RESTRICT __restrict -#endif -#ifndef __COMPILER_BARRIER - #define __COMPILER_BARRIER() __ASM volatile("":::"memory") -#endif - - -/* ########################## Core Instruction Access ######################### */ -/** - \brief No Operation - \details No Operation does nothing. This instruction can be used for code alignment purposes. - */ -#define __NOP() __ASM volatile ("nop") - -/** - \brief Wait For Interrupt - \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. - */ -#define __WFI() __ASM volatile ("wfi":::"memory") - - -/** - \brief Wait For Event - \details Wait For Event is a hint instruction that permits the processor to enter - a low-power state until one of a number of events occurs. - */ -#define __WFE() __ASM volatile ("wfe":::"memory") - - -/** - \brief Send Event - \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. - */ -#define __SEV() __ASM volatile ("sev") - - -/** - \brief Instruction Synchronization Barrier - \details Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or memory, - after the instruction has been completed. - */ -__STATIC_FORCEINLINE void __ISB(void) -{ - __ASM volatile ("isb 0xF":::"memory"); -} - - -/** - \brief Data Synchronization Barrier - \details Acts as a special kind of Data Memory Barrier. - It completes when all explicit memory accesses before this instruction complete. - */ -__STATIC_FORCEINLINE void __DSB(void) -{ - __ASM volatile ("dsb 0xF":::"memory"); -} - - -/** - \brief Data Memory Barrier - \details Ensures the apparent order of the explicit memory operations before - and after the instruction, without ensuring their completion. - */ -__STATIC_FORCEINLINE void __DMB(void) -{ - __ASM volatile ("dmb 0xF":::"memory"); -} - - -/** - \brief Reverse byte order (32 bit) - \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. - \param [in] value Value to reverse - \return Reversed value - */ -__STATIC_FORCEINLINE uint32_t __REV(uint32_t value) -{ -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) - return __builtin_bswap32(value); -#else - uint32_t result; - - __ASM ("rev %0, %1" : "=r" (result) : "r" (value) ); - return result; -#endif -} - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. - \param [in] value Value to reverse - \return Reversed value - */ -__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value) -{ - uint32_t result; - __ASM ("rev16 %0, %1" : "=r" (result) : "r" (value)); - return result; -} - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. - \param [in] value Value to reverse - \return Reversed value - */ -__STATIC_FORCEINLINE int16_t __REVSH(int16_t value) -{ -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - return (int16_t)__builtin_bswap16(value); -#else - int16_t result; - - __ASM ("revsh %0, %1" : "=r" (result) : "r" (value) ); - return result; -#endif -} - - -/** - \brief Rotate Right in unsigned value (32 bit) - \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. - \param [in] op1 Value to rotate - \param [in] op2 Number of Bits to rotate - \return Rotated value - */ -__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) -{ - op2 %= 32U; - if (op2 == 0U) - { - return op1; - } - return (op1 >> op2) | (op1 << (32U - op2)); -} - - -/** - \brief Breakpoint - \details Causes the processor to enter Debug state. - Debug tools can use this to investigate system state when the instruction at a particular address is reached. - \param [in] value is ignored by the processor. - If required, a debugger can use it to store additional information about the breakpoint. - */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) - - -/** - \brief Reverse bit order of value - \details Reverses the bit order of the given value. - \param [in] value Value to reverse - \return Reversed value - */ -__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) -{ - uint32_t result; - __ASM ("rbit %0, %1" : "=r" (result) : "r" (value) ); - return result; -} - - -/** - \brief Count leading zeros - \details Counts the number of leading zeros of a data value. - \param [in] value Value to count the leading zeros - \return number of leading zeros in value - */ -__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) -{ - /* Even though __builtin_clz produces a CLZ instruction on ARM, formally - __builtin_clz(0) is undefined behaviour, so handle this case specially. - This guarantees ARM-compatible results if happening to compile on a non-ARM - target, and ensures the compiler doesn't decide to activate any - optimisations using the logic "value was passed to __builtin_clz, so it - is non-zero". - ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a - single CLZ instruction. - */ - if (value == 0U) - { - return 32U; - } - return __builtin_clz(value); -} - -/** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) -{ - uint32_t result; - -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); -#else - /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not - accepted by assembler. So has to use following less efficient pattern. - */ - __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); -#endif - return ((uint8_t) result); /* Add explicit type cast here */ -} - - -/** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) -{ - uint32_t result; - -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); -#else - /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not - accepted by assembler. So has to use following less efficient pattern. - */ - __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); -#endif - return ((uint16_t) result); /* Add explicit type cast here */ -} - - -/** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) -{ - uint32_t result; - - __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); - return(result); -} - - -/** - \brief STR Exclusive (8 bit) - \details Executes a exclusive STR instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) -{ - uint32_t result; - - __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); - return(result); -} - - -/** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) -{ - uint32_t result; - - __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); - return(result); -} - - -/** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) -{ - uint32_t result; - - __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); - return(result); -} - - -/** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. - */ -__STATIC_FORCEINLINE void __CLREX(void) -{ - __ASM volatile ("clrex" ::: "memory"); -} - -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] ARG1 Value to be saturated - \param [in] ARG2 Bit position to saturate to (1..32) - \return Saturated value - */ -#define __SSAT(ARG1, ARG2) \ -__extension__ \ -({ \ - int32_t __RES, __ARG1 = (ARG1); \ - __ASM volatile ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ - __RES; \ - }) - - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] ARG1 Value to be saturated - \param [in] ARG2 Bit position to saturate to (0..31) - \return Saturated value - */ -#define __USAT(ARG1, ARG2) \ -__extension__ \ -({ \ - uint32_t __RES, __ARG1 = (ARG1); \ - __ASM volatile ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ - __RES; \ - }) - -/* ########################### Core Function Access ########################### */ -/** \ingroup CMSIS_Core_FunctionInterface - \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions - @{ - */ - -/** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __enable_irq(void) -{ - __ASM volatile ("cpsie i" : : : "memory"); -} - - -/** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting special-purpose register PRIMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_irq(void) -{ - __ASM volatile ("cpsid i" : : : "memory"); -} - -/** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __enable_fault_irq(void) -{ - __ASM volatile ("cpsie f" : : : "memory"); -} - - -/** - \brief Disable FIQ - \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_fault_irq(void) -{ - __ASM volatile ("cpsid f" : : : "memory"); -} - -/** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value - */ -__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) -{ - #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) - #if __has_builtin(__builtin_arm_get_fpscr) - // Re-enable using built-in when GCC has been fixed - // || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) - /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ - return __builtin_arm_get_fpscr(); - #else - uint32_t result; - - __ASM volatile ("VMRS %0, fpscr" : "=r" (result) ); - return(result); - #endif - #else - return(0U); - #endif -} - - -/** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set - */ -__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) -{ - #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) - #if __has_builtin(__builtin_arm_set_fpscr) - // Re-enable using built-in when GCC has been fixed - // || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) - /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ - __builtin_arm_set_fpscr(fpscr); - #else - __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory"); - #endif - #else - (void)fpscr; - #endif -} - - -/*@} end of CMSIS_Core_RegAccFunctions */ - - -/* ################### Compiler specific Intrinsics ########################### */ -/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics - Access to dedicated SIMD instructions - @{ -*/ - -__STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - - -__STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - - -__STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1) -{ - uint32_t result; - - __ASM ("sxtb16 %0, %1" : "=r" (result) : "r" (op1)); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3) -{ - uint32_t result; - - __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); - return(result); -} - -__STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc) -{ - union llreg_u{ - uint32_t w32[2]; - uint64_t w64; - } llr; - llr.w64 = acc; - -#ifndef __ARMEB__ /* Little endian */ - __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); -#else /* Big endian */ - __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); -#endif - - return(llr.w64); -} - -__STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc) -{ - union llreg_u{ - uint32_t w32[2]; - uint64_t w64; - } llr; - llr.w64 = acc; - -#ifndef __ARMEB__ /* Little endian */ - __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); -#else /* Big endian */ - __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); -#endif - - return(llr.w64); -} - -__STATIC_FORCEINLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3) -{ - uint32_t result; - - __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); - return(result); -} - -__STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2) -{ - int32_t result; - - __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) -{ - int32_t result; - - __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - - -__STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - - - -#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ - ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) - -#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ - ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) - -__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) -{ - int32_t result; - - __ASM ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3) -{ - uint32_t result; - - __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); - return(result); -} - -/*@} end of group CMSIS_SIMD_intrinsics */ - - /** \defgroup CMSIS_Core_intrinsics CMSIS Core Intrinsics Access to dedicated SIMD instructions @{ */ - /** \brief Get CPSR Register \return CPSR Register value */ @@ -887,71 +155,6 @@ __STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc) #define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" ) #define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" ) -#include "cmsis_cp15.h" - -/** \brief Enable Floating Point Unit - - Critical section, called from undef handler, so systick is disabled - */ -__STATIC_INLINE void __FPU_Enable(void) -{ - // Permit access to VFP/NEON, registers by modifying CPACR - const uint32_t cpacr = __get_CPACR(); - __set_CPACR(cpacr | 0x00F00000ul); - __ISB(); - - // Enable VFP/NEON - const uint32_t fpexc = __get_FPEXC(); - __set_FPEXC(fpexc | 0x40000000ul); - - __ASM volatile( - // Initialise VFP/NEON registers to 0 - " MOV R2,#0 \n" - - // Initialise D16 registers to 0 - " VMOV D0, R2,R2 \n" - " VMOV D1, R2,R2 \n" - " VMOV D2, R2,R2 \n" - " VMOV D3, R2,R2 \n" - " VMOV D4, R2,R2 \n" - " VMOV D5, R2,R2 \n" - " VMOV D6, R2,R2 \n" - " VMOV D7, R2,R2 \n" - " VMOV D8, R2,R2 \n" - " VMOV D9, R2,R2 \n" - " VMOV D10,R2,R2 \n" - " VMOV D11,R2,R2 \n" - " VMOV D12,R2,R2 \n" - " VMOV D13,R2,R2 \n" - " VMOV D14,R2,R2 \n" - " VMOV D15,R2,R2 \n" - -#if (defined(__ARM_NEON) && (__ARM_NEON == 1)) - // Initialise D32 registers to 0 - " VMOV D16,R2,R2 \n" - " VMOV D17,R2,R2 \n" - " VMOV D18,R2,R2 \n" - " VMOV D19,R2,R2 \n" - " VMOV D20,R2,R2 \n" - " VMOV D21,R2,R2 \n" - " VMOV D22,R2,R2 \n" - " VMOV D23,R2,R2 \n" - " VMOV D24,R2,R2 \n" - " VMOV D25,R2,R2 \n" - " VMOV D26,R2,R2 \n" - " VMOV D27,R2,R2 \n" - " VMOV D28,R2,R2 \n" - " VMOV D29,R2,R2 \n" - " VMOV D30,R2,R2 \n" - " VMOV D31,R2,R2 \n" -#endif - : : : "cc", "r2" - ); - - // Initialise FPSCR to a known state - const uint32_t fpscr = __get_FPSCR(); - __set_FPSCR(fpscr & 0x00086060ul); -} /*@} end of group CMSIS_Core_intrinsics */ diff --git a/CMSIS/Core/Include/a-profile/cmsis_iccarm_a.h b/CMSIS/Core/Include/a-profile/cmsis_iccarm_a.h index e8bed5281..098a443fe 100644 --- a/CMSIS/Core/Include/a-profile/cmsis_iccarm_a.h +++ b/CMSIS/Core/Include/a-profile/cmsis_iccarm_a.h @@ -1,14 +1,14 @@ /**************************************************************************//** * @file cmsis_iccarm_a.h * @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file - * @version V5.0.8 - * @date 13. November 2022 + * @version V6.0.0 + * @date 4. August 2023 ******************************************************************************/ //------------------------------------------------------------------------------ // // Copyright (c) 2017-2018 IAR Systems -// Copyright (c) 2018-2019 Arm Limited +// Copyright (c) 2018-2023 Arm Limited. All rights reserved. // // SPDX-License-Identifier: Apache-2.0 // @@ -29,221 +29,24 @@ #ifndef __CMSIS_ICCARM_A_H__ #define __CMSIS_ICCARM_A_H__ -#ifndef __ICCARM__ - #error This file should only be compiled by ICCARM -#endif - -#pragma system_include - -#define __IAR_FT _Pragma("inline=forced") __intrinsic - -#if (__VER__ >= 8000000) - #define __ICCARM_V8 1 -#else - #define __ICCARM_V8 0 -#endif - -#pragma language=extended - -#ifndef __ALIGNED - #if __ICCARM_V8 - #define __ALIGNED(x) __attribute__((aligned(x))) - #elif (__VER__ >= 7080000) - /* Needs IAR language extensions */ - #define __ALIGNED(x) __attribute__((aligned(x))) - #else - #warning No compiler specific solution for __ALIGNED.__ALIGNED is ignored. - #define __ALIGNED(x) - #endif -#endif - - -/* Define compiler macros for CPU architecture, used in CMSIS 5. - */ -#if __ARM_ARCH_7A__ -/* Macro already defined */ -#else - #if defined(__ARM7A__) - #define __ARM_ARCH_7A__ 1 - #endif -#endif - -#ifndef __ASM - #define __ASM __asm -#endif - -#ifndef __COMPILER_BARRIER - #define __COMPILER_BARRIER() __ASM volatile("":::"memory") -#endif - -#ifndef __INLINE - #define __INLINE inline -#endif - -#ifndef __NO_RETURN - #if __ICCARM_V8 - #define __NO_RETURN __attribute__((__noreturn__)) - #else - #define __NO_RETURN _Pragma("object_attribute=__noreturn") - #endif -#endif - -#ifndef __PACKED - #if __ICCARM_V8 - #define __PACKED __attribute__((packed, aligned(1))) - #else - /* Needs IAR language extensions */ - #define __PACKED __packed - #endif -#endif - -#ifndef __PACKED_STRUCT - #if __ICCARM_V8 - #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) - #else - /* Needs IAR language extensions */ - #define __PACKED_STRUCT __packed struct - #endif -#endif - -#ifndef __PACKED_UNION - #if __ICCARM_V8 - #define __PACKED_UNION union __attribute__((packed, aligned(1))) - #else - /* Needs IAR language extensions */ - #define __PACKED_UNION __packed union - #endif -#endif - -#ifndef __RESTRICT - #if __ICCARM_V8 - #define __RESTRICT __restrict - #else - /* Needs IAR language extensions */ - #define __RESTRICT restrict - #endif -#endif - -#ifndef __STATIC_INLINE - #define __STATIC_INLINE static inline -#endif - -#ifndef __FORCEINLINE - #define __FORCEINLINE _Pragma("inline=forced") -#endif - -#ifndef __STATIC_FORCEINLINE - #define __STATIC_FORCEINLINE __FORCEINLINE __STATIC_INLINE -#endif - -#ifndef CMSIS_DEPRECATED - #define CMSIS_DEPRECATED __attribute__((deprecated)) -#endif - -#ifndef __UNALIGNED_UINT16_READ - #pragma language=save - #pragma language=extended - __IAR_FT uint16_t __iar_uint16_read(void const *ptr) - { - return *(__packed uint16_t*)(ptr); - } - #pragma language=restore - #define __UNALIGNED_UINT16_READ(PTR) __iar_uint16_read(PTR) -#endif - - -#ifndef __UNALIGNED_UINT16_WRITE - #pragma language=save - #pragma language=extended - __IAR_FT void __iar_uint16_write(void const *ptr, uint16_t val) - { - *(__packed uint16_t*)(ptr) = val;; - } - #pragma language=restore - #define __UNALIGNED_UINT16_WRITE(PTR,VAL) __iar_uint16_write(PTR,VAL) -#endif - -#ifndef __UNALIGNED_UINT32_READ - #pragma language=save - #pragma language=extended - __IAR_FT uint32_t __iar_uint32_read(void const *ptr) - { - return *(__packed uint32_t*)(ptr); - } - #pragma language=restore - #define __UNALIGNED_UINT32_READ(PTR) __iar_uint32_read(PTR) +#ifndef __CMSIS_ICCARM_H__ + #error "This file must not be included directly" #endif -#ifndef __UNALIGNED_UINT32_WRITE - #pragma language=save - #pragma language=extended - __IAR_FT void __iar_uint32_write(void const *ptr, uint32_t val) - { - *(__packed uint32_t*)(ptr) = val;; - } - #pragma language=restore - #define __UNALIGNED_UINT32_WRITE(PTR,VAL) __iar_uint32_write(PTR,VAL) -#endif - -#if 0 -#ifndef __UNALIGNED_UINT32 /* deprecated */ - #pragma language=save - #pragma language=extended - __packed struct __iar_u32 { uint32_t v; }; - #pragma language=restore - #define __UNALIGNED_UINT32(PTR) (((struct __iar_u32 *)(PTR))->v) -#endif -#endif - -#ifndef __USED - #if __ICCARM_V8 - #define __USED __attribute__((used)) - #else - #define __USED _Pragma("__root") - #endif -#endif - -#ifndef __WEAK - #if __ICCARM_V8 - #define __WEAK __attribute__((weak)) - #else - #define __WEAK _Pragma("__weak") - #endif -#endif - - -#ifndef __ICCARM_INTRINSICS_VERSION__ - #define __ICCARM_INTRINSICS_VERSION__ 0 +#ifndef __ICCARM__ + #error This file should only be compiled by ICCARM #endif #if __ICCARM_INTRINSICS_VERSION__ == 2 - - #if defined(__CLZ) - #undef __CLZ - #endif - #if defined(__REVSH) - #undef __REVSH - #endif - #if defined(__RBIT) - #undef __RBIT - #endif - #if defined(__SSAT) - #undef __SSAT - #endif - #if defined(__USAT) - #undef __USAT - #endif - - #include "iccarm_builtin.h" - - #define __disable_fault_irq __iar_builtin_disable_fiq + #define __disable_fault_irq __iar_builtin_disable_fiq #define __disable_irq __iar_builtin_disable_interrupt - #define __enable_fault_irq __iar_builtin_enable_fiq + #define __enable_fault_irq __iar_builtin_enable_fiq #define __enable_irq __iar_builtin_enable_interrupt #define __arm_rsr __iar_builtin_rsr #define __arm_wsr __iar_builtin_wsr - #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U))) + #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) #define __get_FPSCR() (__arm_rsr("FPSCR")) #define __set_FPSCR(VALUE) (__arm_wsr("FPSCR", (VALUE))) #else @@ -273,110 +76,6 @@ #define __set_CP64(cp, op1, Rt, CRm) \ __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" ) - #include "cmsis_cp15.h" - - #define __NOP __iar_builtin_no_operation - - #define __CLZ __iar_builtin_CLZ - #define __CLREX __iar_builtin_CLREX - - #define __DMB __iar_builtin_DMB - #define __DSB __iar_builtin_DSB - #define __ISB __iar_builtin_ISB - - #define __LDREXB __iar_builtin_LDREXB - #define __LDREXH __iar_builtin_LDREXH - #define __LDREXW __iar_builtin_LDREX - - #define __RBIT __iar_builtin_RBIT - #define __REV __iar_builtin_REV - #define __REV16 __iar_builtin_REV16 - - __IAR_FT int16_t __REVSH(int16_t val) - { - return (int16_t) __iar_builtin_REVSH(val); - } - - #define __ROR __iar_builtin_ROR - #define __RRX __iar_builtin_RRX - - #define __SEV __iar_builtin_SEV - - #define __SSAT __iar_builtin_SSAT - - #define __STREXB __iar_builtin_STREXB - #define __STREXH __iar_builtin_STREXH - #define __STREXW __iar_builtin_STREX - - #define __USAT __iar_builtin_USAT - - #define __WFE __iar_builtin_WFE - #define __WFI __iar_builtin_WFI - - #define __SADD8 __iar_builtin_SADD8 - #define __QADD8 __iar_builtin_QADD8 - #define __SHADD8 __iar_builtin_SHADD8 - #define __UADD8 __iar_builtin_UADD8 - #define __UQADD8 __iar_builtin_UQADD8 - #define __UHADD8 __iar_builtin_UHADD8 - #define __SSUB8 __iar_builtin_SSUB8 - #define __QSUB8 __iar_builtin_QSUB8 - #define __SHSUB8 __iar_builtin_SHSUB8 - #define __USUB8 __iar_builtin_USUB8 - #define __UQSUB8 __iar_builtin_UQSUB8 - #define __UHSUB8 __iar_builtin_UHSUB8 - #define __SADD16 __iar_builtin_SADD16 - #define __QADD16 __iar_builtin_QADD16 - #define __SHADD16 __iar_builtin_SHADD16 - #define __UADD16 __iar_builtin_UADD16 - #define __UQADD16 __iar_builtin_UQADD16 - #define __UHADD16 __iar_builtin_UHADD16 - #define __SSUB16 __iar_builtin_SSUB16 - #define __QSUB16 __iar_builtin_QSUB16 - #define __SHSUB16 __iar_builtin_SHSUB16 - #define __USUB16 __iar_builtin_USUB16 - #define __UQSUB16 __iar_builtin_UQSUB16 - #define __UHSUB16 __iar_builtin_UHSUB16 - #define __SASX __iar_builtin_SASX - #define __QASX __iar_builtin_QASX - #define __SHASX __iar_builtin_SHASX - #define __UASX __iar_builtin_UASX - #define __UQASX __iar_builtin_UQASX - #define __UHASX __iar_builtin_UHASX - #define __SSAX __iar_builtin_SSAX - #define __QSAX __iar_builtin_QSAX - #define __SHSAX __iar_builtin_SHSAX - #define __USAX __iar_builtin_USAX - #define __UQSAX __iar_builtin_UQSAX - #define __UHSAX __iar_builtin_UHSAX - #define __USAD8 __iar_builtin_USAD8 - #define __USADA8 __iar_builtin_USADA8 - #define __SSAT16 __iar_builtin_SSAT16 - #define __USAT16 __iar_builtin_USAT16 - #define __UXTB16 __iar_builtin_UXTB16 - #define __UXTAB16 __iar_builtin_UXTAB16 - #define __SXTB16 __iar_builtin_SXTB16 - #define __SXTAB16 __iar_builtin_SXTAB16 - #define __SMUAD __iar_builtin_SMUAD - #define __SMUADX __iar_builtin_SMUADX - #define __SMMLA __iar_builtin_SMMLA - #define __SMLAD __iar_builtin_SMLAD - #define __SMLADX __iar_builtin_SMLADX - #define __SMLALD __iar_builtin_SMLALD - #define __SMLALDX __iar_builtin_SMLALDX - #define __SMUSD __iar_builtin_SMUSD - #define __SMUSDX __iar_builtin_SMUSDX - #define __SMLSD __iar_builtin_SMLSD - #define __SMLSDX __iar_builtin_SMLSDX - #define __SMLSLD __iar_builtin_SMLSLD - #define __SMLSLDX __iar_builtin_SMLSLDX - #define __SEL __iar_builtin_SEL - #define __QADD __iar_builtin_QADD - #define __QSUB __iar_builtin_QSUB - #define __PKHBT __iar_builtin_PKHBT - #define __PKHTB __iar_builtin_PKHTB - -#else /* __ICCARM_INTRINSICS_VERSION__ == 2 */ #if !((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U))) #define __get_FPSCR __cmsis_iar_get_FPSR_not_active @@ -460,12 +159,8 @@ #define __set_CP64(cp, op1, Rt, CRm) \ __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" ) - #include "cmsis_cp15.h" - #endif /* __ICCARM_INTRINSICS_VERSION__ == 2 */ -#define __BKPT(value) __asm volatile ("BKPT %0" : : "i"(value)) - __IAR_FT uint32_t __get_SP_usr(void) { @@ -495,79 +190,10 @@ __IAR_FT void __set_SP_usr(uint32_t topOfProcStack) #define __get_mode() (__get_CPSR() & 0x1FU) -__STATIC_INLINE -void __FPU_Enable(void) -{ - __ASM volatile( - //Permit access to VFP/NEON, registers by modifying CPACR - " MRC p15,0,R1,c1,c0,2 \n" - " ORR R1,R1,#0x00F00000 \n" - " MCR p15,0,R1,c1,c0,2 \n" - - //Ensure that subsequent instructions occur in the context of VFP/NEON access permitted - " ISB \n" - - //Enable VFP/NEON - " VMRS R1,FPEXC \n" - " ORR R1,R1,#0x40000000 \n" - " VMSR FPEXC,R1 \n" - - //Initialise VFP/NEON registers to 0 - " MOV R2,#0 \n" - - //Initialise D16 registers to 0 - " VMOV D0, R2,R2 \n" - " VMOV D1, R2,R2 \n" - " VMOV D2, R2,R2 \n" - " VMOV D3, R2,R2 \n" - " VMOV D4, R2,R2 \n" - " VMOV D5, R2,R2 \n" - " VMOV D6, R2,R2 \n" - " VMOV D7, R2,R2 \n" - " VMOV D8, R2,R2 \n" - " VMOV D9, R2,R2 \n" - " VMOV D10,R2,R2 \n" - " VMOV D11,R2,R2 \n" - " VMOV D12,R2,R2 \n" - " VMOV D13,R2,R2 \n" - " VMOV D14,R2,R2 \n" - " VMOV D15,R2,R2 \n" - -#ifdef __ARM_ADVANCED_SIMD__ - //Initialise D32 registers to 0 - " VMOV D16,R2,R2 \n" - " VMOV D17,R2,R2 \n" - " VMOV D18,R2,R2 \n" - " VMOV D19,R2,R2 \n" - " VMOV D20,R2,R2 \n" - " VMOV D21,R2,R2 \n" - " VMOV D22,R2,R2 \n" - " VMOV D23,R2,R2 \n" - " VMOV D24,R2,R2 \n" - " VMOV D25,R2,R2 \n" - " VMOV D26,R2,R2 \n" - " VMOV D27,R2,R2 \n" - " VMOV D28,R2,R2 \n" - " VMOV D29,R2,R2 \n" - " VMOV D30,R2,R2 \n" - " VMOV D31,R2,R2 \n" -#endif - - //Initialise FPSCR to a known state - " VMRS R1,FPSCR \n" - " MOV32 R2,#0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero. - " AND R1,R1,R2 \n" - " VMSR FPSCR,R1 \n" - : : : "cc", "r1", "r2" - ); -} - - - #undef __IAR_FT #undef __ICCARM_V8 #pragma diag_default=Pe940 #pragma diag_default=Pe177 -#endif /* __CMSIS_ICCARM_A_H__ */ +#endif /* __CMSIS_ICCARM_COREA_H__ */ diff --git a/CMSIS/Core/Include/a-profile/gicv2.h b/CMSIS/Core/Include/a-profile/gicv2.h new file mode 100644 index 000000000..ab4d427b2 --- /dev/null +++ b/CMSIS/Core/Include/a-profile/gicv2.h @@ -0,0 +1,757 @@ +/****************************************************************************** + * @file gic_v20.h + * @brief CMSIS GIC 2.0 API for Armv7-A MPU and Armv7-R MCU + * @version V6.0.0 + * @date 8. July 2023 + ******************************************************************************/ +/* + * Copyright (c) 2017-2022 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef ARM_GIC_V20_H +#define ARM_GIC_V20_H + +#include + +/** \brief Structure type to access the Generic Interrupt Controller Distributor (GICD) +*/ +typedef struct +{ + __IOM uint32_t CTLR; /*!< \brief Offset: 0x000 (R/W) Distributor Control Register */ + __IM uint32_t TYPER; /*!< \brief Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IM uint32_t IIDR; /*!< \brief Offset: 0x008 (R/ ) Distributor Implementer Identification Register */ + RESERVED(0, uint32_t) + __IOM uint32_t STATUSR; /*!< \brief Offset: 0x010 (R/W) Error Reporting Status Register, optional */ + RESERVED(1[11], uint32_t) + __OM uint32_t SETSPI_NSR; /*!< \brief Offset: 0x040 ( /W) Set SPI Register */ + RESERVED(2, uint32_t) + __OM uint32_t CLRSPI_NSR; /*!< \brief Offset: 0x048 ( /W) Clear SPI Register */ + RESERVED(3, uint32_t) + __OM uint32_t SETSPI_SR; /*!< \brief Offset: 0x050 ( /W) Set SPI, Secure Register */ + RESERVED(4, uint32_t) + __OM uint32_t CLRSPI_SR; /*!< \brief Offset: 0x058 ( /W) Clear SPI, Secure Register */ + RESERVED(5[9], uint32_t) + __IOM uint32_t IGROUPR[32]; /*!< \brief Offset: 0x080 (R/W) Interrupt Group Registers */ + __IOM uint32_t ISENABLER[32]; /*!< \brief Offset: 0x100 (R/W) Interrupt Set-Enable Registers */ + __IOM uint32_t ICENABLER[32]; /*!< \brief Offset: 0x180 (R/W) Interrupt Clear-Enable Registers */ + __IOM uint32_t ISPENDR[32]; /*!< \brief Offset: 0x200 (R/W) Interrupt Set-Pending Registers */ + __IOM uint32_t ICPENDR[32]; /*!< \brief Offset: 0x280 (R/W) Interrupt Clear-Pending Registers */ + __IOM uint32_t ISACTIVER[32]; /*!< \brief Offset: 0x300 (R/W) Interrupt Set-Active Registers */ + __IOM uint32_t ICACTIVER[32]; /*!< \brief Offset: 0x380 (R/W) Interrupt Clear-Active Registers */ + __IOM uint32_t IPRIORITYR[255]; /*!< \brief Offset: 0x400 (R/W) Interrupt Priority Registers */ + RESERVED(6, uint32_t) + __IOM uint32_t ITARGETSR[255]; /*!< \brief Offset: 0x800 (R/W) Interrupt Targets Registers */ + RESERVED(7, uint32_t) + __IOM uint32_t ICFGR[64]; /*!< \brief Offset: 0xC00 (R/W) Interrupt Configuration Registers */ + __IOM uint32_t IGRPMODR[32]; /*!< \brief Offset: 0xD00 (R/W) Interrupt Group Modifier Registers */ + RESERVED(8[32], uint32_t) + __IOM uint32_t NSACR[64]; /*!< \brief Offset: 0xE00 (R/W) Non-secure Access Control Registers */ + __OM uint32_t SGIR; /*!< \brief Offset: 0xF00 ( /W) Software Generated Interrupt Register */ + RESERVED(9[3], uint32_t) + __IOM uint32_t CPENDSGIR[4]; /*!< \brief Offset: 0xF10 (R/W) SGI Clear-Pending Registers */ + __IOM uint32_t SPENDSGIR[4]; /*!< \brief Offset: 0xF20 (R/W) SGI Set-Pending Registers */ + RESERVED(10[5236], uint32_t) + __IOM uint64_t IROUTER[988]; /*!< \brief Offset: 0x6100(R/W) Interrupt Routing Registers */ +} GICDistributor_Type; + +#define GICDistributor ((GICDistributor_Type *) GIC_DISTRIBUTOR_BASE ) /*!< \brief GIC Distributor register set access pointer */ + +/* GICDistributor CTLR Register */ +#define GICDistributor_CTLR_EnableGrp0_Pos 0U /*!< GICDistributor CTLR: EnableGrp0 Position */ +#define GICDistributor_CTLR_EnableGrp0_Msk (0x1U /*<< GICDistributor_CTLR_EnableGrp0_Pos*/) /*!< GICDistributor CTLR: EnableGrp0 Mask */ +#define GICDistributor_CTLR_EnableGrp0(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_CTLR_EnableGrp0_Pos*/)) & GICDistributor_CTLR_EnableGrp0_Msk) + +#define GICDistributor_CTLR_EnableGrp1_Pos 1U /*!< GICDistributor CTLR: EnableGrp1 Position */ +#define GICDistributor_CTLR_EnableGrp1_Msk (0x1U << GICDistributor_CTLR_EnableGrp1_Pos) /*!< GICDistributor CTLR: EnableGrp1 Mask */ +#define GICDistributor_CTLR_EnableGrp1(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_CTLR_EnableGrp1_Pos)) & GICDistributor_CTLR_EnableGrp1_Msk) + +#define GICDistributor_CTLR_ARE_Pos 4U /*!< GICDistributor CTLR: ARE Position */ +#define GICDistributor_CTLR_ARE_Msk (0x1U << GICDistributor_CTLR_ARE_Pos) /*!< GICDistributor CTLR: ARE Mask */ +#define GICDistributor_CTLR_ARE(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_CTLR_ARE_Pos)) & GICDistributor_CTLR_ARE_Msk) + +#define GICDistributor_CTLR_DC_Pos 6U /*!< GICDistributor CTLR: DC Position */ +#define GICDistributor_CTLR_DC_Msk (0x1U << GICDistributor_CTLR_DC_Pos) /*!< GICDistributor CTLR: DC Mask */ +#define GICDistributor_CTLR_DC(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_CTLR_DC_Pos)) & GICDistributor_CTLR_DC_Msk) + +#define GICDistributor_CTLR_EINWF_Pos 7U /*!< GICDistributor CTLR: EINWF Position */ +#define GICDistributor_CTLR_EINWF_Msk (0x1U << GICDistributor_CTLR_EINWF_Pos) /*!< GICDistributor CTLR: EINWF Mask */ +#define GICDistributor_CTLR_EINWF(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_CTLR_EINWF_Pos)) & GICDistributor_CTLR_EINWF_Msk) + +#define GICDistributor_CTLR_RWP_Pos 31U /*!< GICDistributor CTLR: RWP Position */ +#define GICDistributor_CTLR_RWP_Msk (0x1U << GICDistributor_CTLR_RWP_Pos) /*!< GICDistributor CTLR: RWP Mask */ +#define GICDistributor_CTLR_RWP(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_CTLR_RWP_Pos)) & GICDistributor_CTLR_RWP_Msk) + +/* GICDistributor TYPER Register */ +#define GICDistributor_TYPER_ITLinesNumber_Pos 0U /*!< GICDistributor TYPER: ITLinesNumber Position */ +#define GICDistributor_TYPER_ITLinesNumber_Msk (0x1FU /*<< GICDistributor_TYPER_ITLinesNumber_Pos*/) /*!< GICDistributor TYPER: ITLinesNumber Mask */ +#define GICDistributor_TYPER_ITLinesNumber(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_TYPER_ITLinesNumber_Pos*/)) & GICDistributor_CTLR_ITLinesNumber_Msk) + +#define GICDistributor_TYPER_CPUNumber_Pos 5U /*!< GICDistributor TYPER: CPUNumber Position */ +#define GICDistributor_TYPER_CPUNumber_Msk (0x7U << GICDistributor_TYPER_CPUNumber_Pos) /*!< GICDistributor TYPER: CPUNumber Mask */ +#define GICDistributor_TYPER_CPUNumber(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_TYPER_CPUNumber_Pos)) & GICDistributor_TYPER_CPUNumber_Msk) + +#define GICDistributor_TYPER_SecurityExtn_Pos 10U /*!< GICDistributor TYPER: SecurityExtn Position */ +#define GICDistributor_TYPER_SecurityExtn_Msk (0x1U << GICDistributor_TYPER_SecurityExtn_Pos) /*!< GICDistributor TYPER: SecurityExtn Mask */ +#define GICDistributor_TYPER_SecurityExtn(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_TYPER_SecurityExtn_Pos)) & GICDistributor_TYPER_SecurityExtn_Msk) + +#define GICDistributor_TYPER_LSPI_Pos 11U /*!< GICDistributor TYPER: LSPI Position */ +#define GICDistributor_TYPER_LSPI_Msk (0x1FU << GICDistributor_TYPER_LSPI_Pos) /*!< GICDistributor TYPER: LSPI Mask */ +#define GICDistributor_TYPER_LSPI(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_TYPER_LSPI_Pos)) & GICDistributor_TYPER_LSPI_Msk) + +/* GICDistributor IIDR Register */ +#define GICDistributor_IIDR_Implementer_Pos 0U /*!< GICDistributor IIDR: Implementer Position */ +#define GICDistributor_IIDR_Implementer_Msk (0xFFFU /*<< GICDistributor_IIDR_Implementer_Pos*/) /*!< GICDistributor IIDR: Implementer Mask */ +#define GICDistributor_IIDR_Implementer(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_IIDR_Implementer_Pos*/)) & GICDistributor_IIDR_Implementer_Msk) + +#define GICDistributor_IIDR_Revision_Pos 12U /*!< GICDistributor IIDR: Revision Position */ +#define GICDistributor_IIDR_Revision_Msk (0xFU << GICDistributor_IIDR_Revision_Pos) /*!< GICDistributor IIDR: Revision Mask */ +#define GICDistributor_IIDR_Revision(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_IIDR_Revision_Pos)) & GICDistributor_IIDR_Revision_Msk) + +#define GICDistributor_IIDR_Variant_Pos 16U /*!< GICDistributor IIDR: Variant Position */ +#define GICDistributor_IIDR_Variant_Msk (0xFU << GICDistributor_IIDR_Variant_Pos) /*!< GICDistributor IIDR: Variant Mask */ +#define GICDistributor_IIDR_Variant(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_IIDR_Variant_Pos)) & GICDistributor_IIDR_Variant_Msk) + +#define GICDistributor_IIDR_ProductID_Pos 24U /*!< GICDistributor IIDR: ProductID Position */ +#define GICDistributor_IIDR_ProductID_Msk (0xFFU << GICDistributor_IIDR_ProductID_Pos) /*!< GICDistributor IIDR: ProductID Mask */ +#define GICDistributor_IIDR_ProductID(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_IIDR_ProductID_Pos)) & GICDistributor_IIDR_ProductID_Msk) + +/* GICDistributor STATUSR Register */ +#define GICDistributor_STATUSR_RRD_Pos 0U /*!< GICDistributor STATUSR: RRD Position */ +#define GICDistributor_STATUSR_RRD_Msk (0x1U /*<< GICDistributor_STATUSR_RRD_Pos*/) /*!< GICDistributor STATUSR: RRD Mask */ +#define GICDistributor_STATUSR_RRD(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_STATUSR_RRD_Pos*/)) & GICDistributor_STATUSR_RRD_Msk) + +#define GICDistributor_STATUSR_WRD_Pos 1U /*!< GICDistributor STATUSR: WRD Position */ +#define GICDistributor_STATUSR_WRD_Msk (0x1U << GICDistributor_STATUSR_WRD_Pos) /*!< GICDistributor STATUSR: WRD Mask */ +#define GICDistributor_STATUSR_WRD(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_STATUSR_WRD_Pos)) & GICDistributor_STATUSR_WRD_Msk) + +#define GICDistributor_STATUSR_RWOD_Pos 2U /*!< GICDistributor STATUSR: RWOD Position */ +#define GICDistributor_STATUSR_RWOD_Msk (0x1U << GICDistributor_STATUSR_RWOD_Pos) /*!< GICDistributor STATUSR: RWOD Mask */ +#define GICDistributor_STATUSR_RWOD(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_STATUSR_RWOD_Pos)) & GICDistributor_STATUSR_RWOD_Msk) + +#define GICDistributor_STATUSR_WROD_Pos 3U /*!< GICDistributor STATUSR: WROD Position */ +#define GICDistributor_STATUSR_WROD_Msk (0x1U << GICDistributor_STATUSR_WROD_Pos) /*!< GICDistributor STATUSR: WROD Mask */ +#define GICDistributor_STATUSR_WROD(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_STATUSR_WROD_Pos)) & GICDistributor_STATUSR_WROD_Msk) + +/* GICDistributor SETSPI_NSR Register */ +#define GICDistributor_SETSPI_NSR_INTID_Pos 0U /*!< GICDistributor SETSPI_NSR: INTID Position */ +#define GICDistributor_SETSPI_NSR_INTID_Msk (0x3FFU /*<< GICDistributor_SETSPI_NSR_INTID_Pos*/) /*!< GICDistributor SETSPI_NSR: INTID Mask */ +#define GICDistributor_SETSPI_NSR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_SETSPI_NSR_INTID_Pos*/)) & GICDistributor_SETSPI_NSR_INTID_Msk) + +/* GICDistributor CLRSPI_NSR Register */ +#define GICDistributor_CLRSPI_NSR_INTID_Pos 0U /*!< GICDistributor CLRSPI_NSR: INTID Position */ +#define GICDistributor_CLRSPI_NSR_INTID_Msk (0x3FFU /*<< GICDistributor_CLRSPI_NSR_INTID_Pos*/) /*!< GICDistributor CLRSPI_NSR: INTID Mask */ +#define GICDistributor_CLRSPI_NSR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_CLRSPI_NSR_INTID_Pos*/)) & GICDistributor_CLRSPI_NSR_INTID_Msk) + +/* GICDistributor SETSPI_SR Register */ +#define GICDistributor_SETSPI_SR_INTID_Pos 0U /*!< GICDistributor SETSPI_SR: INTID Position */ +#define GICDistributor_SETSPI_SR_INTID_Msk (0x3FFU /*<< GICDistributor_SETSPI_SR_INTID_Pos*/) /*!< GICDistributor SETSPI_SR: INTID Mask */ +#define GICDistributor_SETSPI_SR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_SETSPI_SR_INTID_Pos*/)) & GICDistributor_SETSPI_SR_INTID_Msk) + +/* GICDistributor CLRSPI_SR Register */ +#define GICDistributor_CLRSPI_SR_INTID_Pos 0U /*!< GICDistributor CLRSPI_SR: INTID Position */ +#define GICDistributor_CLRSPI_SR_INTID_Msk (0x3FFU /*<< GICDistributor_CLRSPI_SR_INTID_Pos*/) /*!< GICDistributor CLRSPI_SR: INTID Mask */ +#define GICDistributor_CLRSPI_SR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_CLRSPI_SR_INTID_Pos*/)) & GICDistributor_CLRSPI_SR_INTID_Msk) + +/* GICDistributor ITARGETSR Register */ +#define GICDistributor_ITARGETSR_CPU0_Pos 0U /*!< GICDistributor ITARGETSR: CPU0 Position */ +#define GICDistributor_ITARGETSR_CPU0_Msk (0x1U /*<< GICDistributor_ITARGETSR_CPU0_Pos*/) /*!< GICDistributor ITARGETSR: CPU0 Mask */ +#define GICDistributor_ITARGETSR_CPU0(x) (((uint8_t)(((uint8_t)(x)) /*<< GICDistributor_ITARGETSR_CPU0_Pos*/)) & GICDistributor_ITARGETSR_CPU0_Msk) + +#define GICDistributor_ITARGETSR_CPU1_Pos 1U /*!< GICDistributor ITARGETSR: CPU1 Position */ +#define GICDistributor_ITARGETSR_CPU1_Msk (0x1U << GICDistributor_ITARGETSR_CPU1_Pos) /*!< GICDistributor ITARGETSR: CPU1 Mask */ +#define GICDistributor_ITARGETSR_CPU1(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU1_Pos)) & GICDistributor_ITARGETSR_CPU1_Msk) + +#define GICDistributor_ITARGETSR_CPU2_Pos 2U /*!< GICDistributor ITARGETSR: CPU2 Position */ +#define GICDistributor_ITARGETSR_CPU2_Msk (0x1U << GICDistributor_ITARGETSR_CPU2_Pos) /*!< GICDistributor ITARGETSR: CPU2 Mask */ +#define GICDistributor_ITARGETSR_CPU2(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU2_Pos)) & GICDistributor_ITARGETSR_CPU2_Msk) + +#define GICDistributor_ITARGETSR_CPU3_Pos 3U /*!< GICDistributor ITARGETSR: CPU3 Position */ +#define GICDistributor_ITARGETSR_CPU3_Msk (0x1U << GICDistributor_ITARGETSR_CPU3_Pos) /*!< GICDistributor ITARGETSR: CPU3 Mask */ +#define GICDistributor_ITARGETSR_CPU3(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU3_Pos)) & GICDistributor_ITARGETSR_CPU3_Msk) + +#define GICDistributor_ITARGETSR_CPU4_Pos 4U /*!< GICDistributor ITARGETSR: CPU4 Position */ +#define GICDistributor_ITARGETSR_CPU4_Msk (0x1U << GICDistributor_ITARGETSR_CPU4_Pos) /*!< GICDistributor ITARGETSR: CPU4 Mask */ +#define GICDistributor_ITARGETSR_CPU4(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU4_Pos)) & GICDistributor_ITARGETSR_CPU4_Msk) + +#define GICDistributor_ITARGETSR_CPU5_Pos 5U /*!< GICDistributor ITARGETSR: CPU5 Position */ +#define GICDistributor_ITARGETSR_CPU5_Msk (0x1U << GICDistributor_ITARGETSR_CPU5_Pos) /*!< GICDistributor ITARGETSR: CPU5 Mask */ +#define GICDistributor_ITARGETSR_CPU5(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU5_Pos)) & GICDistributor_ITARGETSR_CPU5_Msk) + +#define GICDistributor_ITARGETSR_CPU6_Pos 6U /*!< GICDistributor ITARGETSR: CPU6 Position */ +#define GICDistributor_ITARGETSR_CPU6_Msk (0x1U << GICDistributor_ITARGETSR_CPU6_Pos) /*!< GICDistributor ITARGETSR: CPU6 Mask */ +#define GICDistributor_ITARGETSR_CPU6(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU6_Pos)) & GICDistributor_ITARGETSR_CPU6_Msk) + +#define GICDistributor_ITARGETSR_CPU7_Pos 7U /*!< GICDistributor ITARGETSR: CPU7 Position */ +#define GICDistributor_ITARGETSR_CPU7_Msk (0x1U << GICDistributor_ITARGETSR_CPU7_Pos) /*!< GICDistributor ITARGETSR: CPU7 Mask */ +#define GICDistributor_ITARGETSR_CPU7(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU7_Pos)) & GICDistributor_ITARGETSR_CPU7_Msk) + +/* GICDistributor SGIR Register */ +#define GICDistributor_SGIR_INTID_Pos 0U /*!< GICDistributor SGIR: INTID Position */ +#define GICDistributor_SGIR_INTID_Msk (0x7U /*<< GICDistributor_SGIR_INTID_Pos*/) /*!< GICDistributor SGIR: INTID Mask */ +#define GICDistributor_SGIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_SGIR_INTID_Pos*/)) & GICDistributor_SGIR_INTID_Msk) + +#define GICDistributor_SGIR_NSATT_Pos 15U /*!< GICDistributor SGIR: NSATT Position */ +#define GICDistributor_SGIR_NSATT_Msk (0x1U << GICDistributor_SGIR_NSATT_Pos) /*!< GICDistributor SGIR: NSATT Mask */ +#define GICDistributor_SGIR_NSATT(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_SGIR_NSATT_Pos)) & GICDistributor_SGIR_NSATT_Msk) + +#define GICDistributor_SGIR_CPUTargetList_Pos 16U /*!< GICDistributor SGIR: CPUTargetList Position */ +#define GICDistributor_SGIR_CPUTargetList_Msk (0xFFU << GICDistributor_SGIR_CPUTargetList_Pos) /*!< GICDistributor SGIR: CPUTargetList Mask */ +#define GICDistributor_SGIR_CPUTargetList(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_SGIR_CPUTargetList_Pos)) & GICDistributor_SGIR_CPUTargetList_Msk) + +#define GICDistributor_SGIR_TargetFilterList_Pos 24U /*!< GICDistributor SGIR: TargetFilterList Position */ +#define GICDistributor_SGIR_TargetFilterList_Msk (0x3U << GICDistributor_SGIR_TargetFilterList_Pos) /*!< GICDistributor SGIR: TargetFilterList Mask */ +#define GICDistributor_SGIR_TargetFilterList(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_SGIR_TargetFilterList_Pos)) & GICDistributor_SGIR_TargetFilterList_Msk) + +/* GICDistributor IROUTER Register */ +#define GICDistributor_IROUTER_Aff0_Pos 0UL /*!< GICDistributor IROUTER: Aff0 Position */ +#define GICDistributor_IROUTER_Aff0_Msk (0xFFUL /*<< GICDistributor_IROUTER_Aff0_Pos*/) /*!< GICDistributor IROUTER: Aff0 Mask */ +#define GICDistributor_IROUTER_Aff0(x) (((uint64_t)(((uint64_t)(x)) /*<< GICDistributor_IROUTER_Aff0_Pos*/)) & GICDistributor_IROUTER_Aff0_Msk) + +#define GICDistributor_IROUTER_Aff1_Pos 8UL /*!< GICDistributor IROUTER: Aff1 Position */ +#define GICDistributor_IROUTER_Aff1_Msk (0xFFUL << GICDistributor_IROUTER_Aff1_Pos) /*!< GICDistributor IROUTER: Aff1 Mask */ +#define GICDistributor_IROUTER_Aff1(x) (((uint64_t)(((uint64_t)(x)) << GICDistributor_IROUTER_Aff1_Pos)) & GICDistributor_IROUTER_Aff1_Msk) + +#define GICDistributor_IROUTER_Aff2_Pos 16UL /*!< GICDistributor IROUTER: Aff2 Position */ +#define GICDistributor_IROUTER_Aff2_Msk (0xFFUL << GICDistributor_IROUTER_Aff2_Pos) /*!< GICDistributor IROUTER: Aff2 Mask */ +#define GICDistributor_IROUTER_Aff2(x) (((uint64_t)(((uint64_t)(x)) << GICDistributor_IROUTER_Aff2_Pos)) & GICDistributor_IROUTER_Aff2_Msk) + +#define GICDistributor_IROUTER_IRM_Pos 31UL /*!< GICDistributor IROUTER: IRM Position */ +#define GICDistributor_IROUTER_IRM_Msk (0xFFUL << GICDistributor_IROUTER_IRM_Pos) /*!< GICDistributor IROUTER: IRM Mask */ +#define GICDistributor_IROUTER_IRM(x) (((uint64_t)(((uint64_t)(x)) << GICDistributor_IROUTER_IRM_Pos)) & GICDistributor_IROUTER_IRM_Msk) + +#define GICDistributor_IROUTER_Aff3_Pos 32UL /*!< GICDistributor IROUTER: Aff3 Position */ +#define GICDistributor_IROUTER_Aff3_Msk (0xFFUL << GICDistributor_IROUTER_Aff3_Pos) /*!< GICDistributor IROUTER: Aff3 Mask */ +#define GICDistributor_IROUTER_Aff3(x) (((uint64_t)(((uint64_t)(x)) << GICDistributor_IROUTER_Aff3_Pos)) & GICDistributor_IROUTER_Aff3_Msk) + + + +/** \brief Structure type to access the Generic Interrupt Controller Interface (GICC) +*/ +typedef struct +{ + __IOM uint32_t CTLR; /*!< \brief Offset: 0x000 (R/W) CPU Interface Control Register */ + __IOM uint32_t PMR; /*!< \brief Offset: 0x004 (R/W) Interrupt Priority Mask Register */ + __IOM uint32_t BPR; /*!< \brief Offset: 0x008 (R/W) Binary Point Register */ + __IM uint32_t IAR; /*!< \brief Offset: 0x00C (R/ ) Interrupt Acknowledge Register */ + __OM uint32_t EOIR; /*!< \brief Offset: 0x010 ( /W) End Of Interrupt Register */ + __IM uint32_t RPR; /*!< \brief Offset: 0x014 (R/ ) Running Priority Register */ + __IM uint32_t HPPIR; /*!< \brief Offset: 0x018 (R/ ) Highest Priority Pending Interrupt Register */ + __IOM uint32_t ABPR; /*!< \brief Offset: 0x01C (R/W) Aliased Binary Point Register */ + __IM uint32_t AIAR; /*!< \brief Offset: 0x020 (R/ ) Aliased Interrupt Acknowledge Register */ + __OM uint32_t AEOIR; /*!< \brief Offset: 0x024 ( /W) Aliased End Of Interrupt Register */ + __IM uint32_t AHPPIR; /*!< \brief Offset: 0x028 (R/ ) Aliased Highest Priority Pending Interrupt Register */ + __IOM uint32_t STATUSR; /*!< \brief Offset: 0x02C (R/W) Error Reporting Status Register, optional */ + RESERVED(1[40], uint32_t) + __IOM uint32_t APR[4]; /*!< \brief Offset: 0x0D0 (R/W) Active Priority Register */ + __IOM uint32_t NSAPR[4]; /*!< \brief Offset: 0x0E0 (R/W) Non-secure Active Priority Register */ + RESERVED(2[3], uint32_t) + __IM uint32_t IIDR; /*!< \brief Offset: 0x0FC (R/ ) CPU Interface Identification Register */ + RESERVED(3[960], uint32_t) + __OM uint32_t DIR; /*!< \brief Offset: 0x1000( /W) Deactivate Interrupt Register */ +} GICInterface_Type; + +#define GICInterface ((GICInterface_Type *) GIC_INTERFACE_BASE ) /*!< \brief GIC Interface register set access pointer */ + +/* GICInterface CTLR Register */ +#define GICInterface_CTLR_Enable_Pos 0U /*!< PTIM CTLR: Enable Position */ +#define GICInterface_CTLR_Enable_Msk (0x1U /*<< GICInterface_CTLR_Enable_Pos*/) /*!< PTIM CTLR: Enable Mask */ +#define GICInterface_CTLR_Enable(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_CTLR_Enable_Pos*/)) & GICInterface_CTLR_Enable_Msk) + +/* GICInterface PMR Register */ +#define GICInterface_PMR_Priority_Pos 0U /*!< PTIM PMR: Priority Position */ +#define GICInterface_PMR_Priority_Msk (0xFFU /*<< GICInterface_PMR_Priority_Pos*/) /*!< PTIM PMR: Priority Mask */ +#define GICInterface_PMR_Priority(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_PMR_Priority_Pos*/)) & GICInterface_PMR_Priority_Msk) + +/* GICInterface BPR Register */ +#define GICInterface_BPR_Binary_Point_Pos 0U /*!< PTIM BPR: Binary_Point Position */ +#define GICInterface_BPR_Binary_Point_Msk (0x7U /*<< GICInterface_BPR_Binary_Point_Pos*/) /*!< PTIM BPR: Binary_Point Mask */ +#define GICInterface_BPR_Binary_Point(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_BPR_Binary_Point_Pos*/)) & GICInterface_BPR_Binary_Point_Msk) + +/* GICInterface IAR Register */ +#define GICInterface_IAR_INTID_Pos 0U /*!< PTIM IAR: INTID Position */ +#define GICInterface_IAR_INTID_Msk (0xFFFFFFU /*<< GICInterface_IAR_INTID_Pos*/) /*!< PTIM IAR: INTID Mask */ +#define GICInterface_IAR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_IAR_INTID_Pos*/)) & GICInterface_IAR_INTID_Msk) + +/* GICInterface EOIR Register */ +#define GICInterface_EOIR_INTID_Pos 0U /*!< PTIM EOIR: INTID Position */ +#define GICInterface_EOIR_INTID_Msk (0xFFFFFFU /*<< GICInterface_EOIR_INTID_Pos*/) /*!< PTIM EOIR: INTID Mask */ +#define GICInterface_EOIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_EOIR_INTID_Pos*/)) & GICInterface_EOIR_INTID_Msk) + +/* GICInterface RPR Register */ +#define GICInterface_RPR_INTID_Pos 0U /*!< PTIM RPR: INTID Position */ +#define GICInterface_RPR_INTID_Msk (0xFFU /*<< GICInterface_RPR_INTID_Pos*/) /*!< PTIM RPR: INTID Mask */ +#define GICInterface_RPR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_RPR_INTID_Pos*/)) & GICInterface_RPR_INTID_Msk) + +/* GICInterface HPPIR Register */ +#define GICInterface_HPPIR_INTID_Pos 0U /*!< PTIM HPPIR: INTID Position */ +#define GICInterface_HPPIR_INTID_Msk (0xFFFFFFU /*<< GICInterface_HPPIR_INTID_Pos*/) /*!< PTIM HPPIR: INTID Mask */ +#define GICInterface_HPPIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_HPPIR_INTID_Pos*/)) & GICInterface_HPPIR_INTID_Msk) + +/* GICInterface ABPR Register */ +#define GICInterface_ABPR_Binary_Point_Pos 0U /*!< PTIM ABPR: Binary_Point Position */ +#define GICInterface_ABPR_Binary_Point_Msk (0x7U /*<< GICInterface_ABPR_Binary_Point_Pos*/) /*!< PTIM ABPR: Binary_Point Mask */ +#define GICInterface_ABPR_Binary_Point(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_ABPR_Binary_Point_Pos*/)) & GICInterface_ABPR_Binary_Point_Msk) + +/* GICInterface AIAR Register */ +#define GICInterface_AIAR_INTID_Pos 0U /*!< PTIM AIAR: INTID Position */ +#define GICInterface_AIAR_INTID_Msk (0xFFFFFFU /*<< GICInterface_AIAR_INTID_Pos*/) /*!< PTIM AIAR: INTID Mask */ +#define GICInterface_AIAR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_AIAR_INTID_Pos*/)) & GICInterface_AIAR_INTID_Msk) + +/* GICInterface AEOIR Register */ +#define GICInterface_AEOIR_INTID_Pos 0U /*!< PTIM AEOIR: INTID Position */ +#define GICInterface_AEOIR_INTID_Msk (0xFFFFFFU /*<< GICInterface_AEOIR_INTID_Pos*/) /*!< PTIM AEOIR: INTID Mask */ +#define GICInterface_AEOIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_AEOIR_INTID_Pos*/)) & GICInterface_AEOIR_INTID_Msk) + +/* GICInterface AHPPIR Register */ +#define GICInterface_AHPPIR_INTID_Pos 0U /*!< PTIM AHPPIR: INTID Position */ +#define GICInterface_AHPPIR_INTID_Msk (0xFFFFFFU /*<< GICInterface_AHPPIR_INTID_Pos*/) /*!< PTIM AHPPIR: INTID Mask */ +#define GICInterface_AHPPIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_AHPPIR_INTID_Pos*/)) & GICInterface_AHPPIR_INTID_Msk) + +/* GICInterface STATUSR Register */ +#define GICInterface_STATUSR_RRD_Pos 0U /*!< GICInterface STATUSR: RRD Position */ +#define GICInterface_STATUSR_RRD_Msk (0x1U /*<< GICInterface_STATUSR_RRD_Pos*/) /*!< GICInterface STATUSR: RRD Mask */ +#define GICInterface_STATUSR_RRD(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_STATUSR_RRD_Pos*/)) & GICInterface_STATUSR_RRD_Msk) + +#define GICInterface_STATUSR_WRD_Pos 1U /*!< GICInterface STATUSR: WRD Position */ +#define GICInterface_STATUSR_WRD_Msk (0x1U << GICInterface_STATUSR_WRD_Pos) /*!< GICInterface STATUSR: WRD Mask */ +#define GICInterface_STATUSR_WRD(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_STATUSR_WRD_Pos)) & GICInterface_STATUSR_WRD_Msk) + +#define GICInterface_STATUSR_RWOD_Pos 2U /*!< GICInterface STATUSR: RWOD Position */ +#define GICInterface_STATUSR_RWOD_Msk (0x1U << GICInterface_STATUSR_RWOD_Pos) /*!< GICInterface STATUSR: RWOD Mask */ +#define GICInterface_STATUSR_RWOD(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_STATUSR_RWOD_Pos)) & GICInterface_STATUSR_RWOD_Msk) + +#define GICInterface_STATUSR_WROD_Pos 3U /*!< GICInterface STATUSR: WROD Position */ +#define GICInterface_STATUSR_WROD_Msk (0x1U << GICInterface_STATUSR_WROD_Pos) /*!< GICInterface STATUSR: WROD Mask */ +#define GICInterface_STATUSR_WROD(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_STATUSR_WROD_Pos)) & GICInterface_STATUSR_WROD_Msk) + +#define GICInterface_STATUSR_ASV_Pos 4U /*!< GICInterface STATUSR: ASV Position */ +#define GICInterface_STATUSR_ASV_Msk (0x1U << GICInterface_STATUSR_ASV_Pos) /*!< GICInterface STATUSR: ASV Mask */ +#define GICInterface_STATUSR_ASV(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_STATUSR_ASV_Pos)) & GICInterface_STATUSR_ASV_Msk) + +/* GICInterface IIDR Register */ +#define GICInterface_IIDR_Implementer_Pos 0U /*!< GICInterface IIDR: Implementer Position */ +#define GICInterface_IIDR_Implementer_Msk (0xFFFU /*<< GICInterface_IIDR_Implementer_Pos*/) /*!< GICInterface IIDR: Implementer Mask */ +#define GICInterface_IIDR_Implementer(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_IIDR_Implementer_Pos*/)) & GICInterface_IIDR_Implementer_Msk) + +#define GICInterface_IIDR_Revision_Pos 12U /*!< GICInterface IIDR: Revision Position */ +#define GICInterface_IIDR_Revision_Msk (0xFU << GICInterface_IIDR_Revision_Pos) /*!< GICInterface IIDR: Revision Mask */ +#define GICInterface_IIDR_Revision(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_IIDR_Revision_Pos)) & GICInterface_IIDR_Revision_Msk) + +#define GICInterface_IIDR_Arch_version_Pos 16U /*!< GICInterface IIDR: Arch_version Position */ +#define GICInterface_IIDR_Arch_version_Msk (0xFU << GICInterface_IIDR_Arch_version_Pos) /*!< GICInterface IIDR: Arch_version Mask */ +#define GICInterface_IIDR_Arch_version(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_IIDR_Arch_version_Pos)) & GICInterface_IIDR_Arch_version_Msk) + +#define GICInterface_IIDR_ProductID_Pos 20U /*!< GICInterface IIDR: ProductID Position */ +#define GICInterface_IIDR_ProductID_Msk (0xFFFU << GICInterface_IIDR_ProductID_Pos) /*!< GICInterface IIDR: ProductID Mask */ +#define GICInterface_IIDR_ProductID(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_IIDR_ProductID_Pos)) & GICInterface_IIDR_ProductID_Msk) + +/* GICInterface DIR Register */ +#define GICInterface_DIR_INTID_Pos 0U /*!< PTIM DIR: INTID Position */ +#define GICInterface_DIR_INTID_Msk (0xFFFFFFU /*<< GICInterface_DIR_INTID_Pos*/) /*!< PTIM DIR: INTID Mask */ +#define GICInterface_DIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_DIR_INTID_Pos*/)) & GICInterface_DIR_INTID_Msk) + + + +/** \brief Enable the interrupt distributor using the GIC's CTLR register. +*/ +__STATIC_INLINE void GIC_EnableDistributor(void) +{ + GICDistributor->CTLR |= 1U; +} + +/** \brief Disable the interrupt distributor using the GIC's CTLR register. +*/ +__STATIC_INLINE void GIC_DisableDistributor(void) +{ + GICDistributor->CTLR &=~1U; +} + +/** \brief Read the GIC's TYPER register. +* \return GICDistributor_Type::TYPER +*/ +__STATIC_INLINE uint32_t GIC_DistributorInfo(void) +{ + return (GICDistributor->TYPER); +} + +/** \brief Reads the GIC's IIDR register. +* \return GICDistributor_Type::IIDR +*/ +__STATIC_INLINE uint32_t GIC_DistributorImplementer(void) +{ + return (GICDistributor->IIDR); +} + +/** \brief Sets the GIC's ITARGETSR register for the given interrupt. +* \param [in] IRQn Interrupt to be configured. +* \param [in] cpu_target CPU interfaces to assign this interrupt to. +*/ +__STATIC_INLINE void GIC_SetTarget(IRQn_Type IRQn, uint32_t cpu_target) +{ + uint32_t mask = GICDistributor->ITARGETSR[IRQn / 4U] & ~(0xFFUL << ((IRQn % 4U) * 8U)); + GICDistributor->ITARGETSR[IRQn / 4U] = mask | ((cpu_target & 0xFFUL) << ((IRQn % 4U) * 8U)); +} + +/** \brief Read the GIC's ITARGETSR register. +* \param [in] IRQn Interrupt to acquire the configuration for. +* \return GICDistributor_Type::ITARGETSR +*/ +__STATIC_INLINE uint32_t GIC_GetTarget(IRQn_Type IRQn) +{ + return (GICDistributor->ITARGETSR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL; +} + +/** \brief Enable the CPU's interrupt interface. +*/ +__STATIC_INLINE void GIC_EnableInterface(void) +{ + GICInterface->CTLR |= 1U; //enable interface +} + +/** \brief Disable the CPU's interrupt interface. +*/ +__STATIC_INLINE void GIC_DisableInterface(void) +{ + GICInterface->CTLR &=~1U; //disable distributor +} + +/** \brief Read the CPU's IAR register. +* \return GICInterface_Type::IAR +*/ +__STATIC_INLINE IRQn_Type GIC_AcknowledgePending(void) +{ + return (IRQn_Type)(GICInterface->IAR); +} + +/** \brief Writes the given interrupt number to the CPU's EOIR register. +* \param [in] IRQn The interrupt to be signaled as finished. +*/ +__STATIC_INLINE void GIC_EndInterrupt(IRQn_Type IRQn) +{ + GICInterface->EOIR = IRQn; +} + +/** \brief Enables the given interrupt using GIC's ISENABLER register. +* \param [in] IRQn The interrupt to be enabled. +*/ +__STATIC_INLINE void GIC_EnableIRQ(IRQn_Type IRQn) +{ + GICDistributor->ISENABLER[IRQn / 32U] = 1U << (IRQn % 32U); +} + +/** \brief Get interrupt enable status using GIC's ISENABLER register. +* \param [in] IRQn The interrupt to be queried. +* \return 0 - interrupt is not enabled, 1 - interrupt is enabled. +*/ +__STATIC_INLINE uint32_t GIC_GetEnableIRQ(IRQn_Type IRQn) +{ + return (GICDistributor->ISENABLER[IRQn / 32U] >> (IRQn % 32U)) & 1UL; +} + +/** \brief Disables the given interrupt using GIC's ICENABLER register. +* \param [in] IRQn The interrupt to be disabled. +*/ +__STATIC_INLINE void GIC_DisableIRQ(IRQn_Type IRQn) +{ + GICDistributor->ICENABLER[IRQn / 32U] = 1U << (IRQn % 32U); +} + +/** \brief Get interrupt pending status from GIC's ISPENDR register. +* \param [in] IRQn The interrupt to be queried. +* \return 0 - interrupt is not pending, 1 - interrupt is pendig. +*/ +__STATIC_INLINE uint32_t GIC_GetPendingIRQ(IRQn_Type IRQn) +{ + uint32_t pend; + + if (IRQn >= 16U) { + pend = (GICDistributor->ISPENDR[IRQn / 32U] >> (IRQn % 32U)) & 1UL; + } else { + // INTID 0-15 Software Generated Interrupt + pend = (GICDistributor->SPENDSGIR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL; + // No CPU identification offered + if (pend != 0U) { + pend = 1U; + } else { + pend = 0U; + } + } + + return (pend); +} + +/** \brief Sets the given interrupt as pending using GIC's ISPENDR register. +* \param [in] IRQn The interrupt to be enabled. +*/ +__STATIC_INLINE void GIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if (IRQn >= 16U) { + GICDistributor->ISPENDR[IRQn / 32U] = 1U << (IRQn % 32U); + } else { + // INTID 0-15 Software Generated Interrupt + // Forward the interrupt to the CPU interface that requested it + GICDistributor->SGIR = (IRQn | 0x02000000U); + } +} + +/** \brief Clears the given interrupt from being pending using GIC's ICPENDR register. +* \param [in] IRQn The interrupt to be enabled. +*/ +__STATIC_INLINE void GIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if (IRQn >= 16U) { + GICDistributor->ICPENDR[IRQn / 32U] = 1U << (IRQn % 32U); + } else { + // INTID 0-15 Software Generated Interrupt + GICDistributor->CPENDSGIR[IRQn / 4U] = 1U << ((IRQn % 4U) * 8U); + } +} + +/** \brief Sets the interrupt configuration using GIC's ICFGR register. +* \param [in] IRQn The interrupt to be configured. +* \param [in] int_config Int_config field value. Bit 0: Reserved (0 - N-N model, 1 - 1-N model for some GIC before v1) +* Bit 1: 0 - level sensitive, 1 - edge triggered +*/ +__STATIC_INLINE void GIC_SetConfiguration(IRQn_Type IRQn, uint32_t int_config) +{ + uint32_t icfgr = GICDistributor->ICFGR[IRQn / 16U]; /* read current register content */ + uint32_t shift = (IRQn % 16U) << 1U; /* calculate shift value */ + + int_config &= 3U; /* only 2 bits are valid */ + icfgr &= (~(3U << shift)); /* clear bits to change */ + icfgr |= ( int_config << shift); /* set new configuration */ + + GICDistributor->ICFGR[IRQn / 16U] = icfgr; /* write new register content */ +} + +/** \brief Get the interrupt configuration from the GIC's ICFGR register. +* \param [in] IRQn Interrupt to acquire the configuration for. +* \return Int_config field value. Bit 0: Reserved (0 - N-N model, 1 - 1-N model for some GIC before v1) +* Bit 1: 0 - level sensitive, 1 - edge triggered +*/ +__STATIC_INLINE uint32_t GIC_GetConfiguration(IRQn_Type IRQn) +{ + return (GICDistributor->ICFGR[IRQn / 16U] >> ((IRQn % 16U) >> 1U)); +} + +/** \brief Set the priority for the given interrupt in the GIC's IPRIORITYR register. +* \param [in] IRQn The interrupt to be configured. +* \param [in] priority The priority for the interrupt, lower values denote higher priorities. +*/ +__STATIC_INLINE void GIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + uint32_t mask = GICDistributor->IPRIORITYR[IRQn / 4U] & ~(0xFFUL << ((IRQn % 4U) * 8U)); + GICDistributor->IPRIORITYR[IRQn / 4U] = mask | ((priority & 0xFFUL) << ((IRQn % 4U) * 8U)); +} + +/** \brief Read the current interrupt priority from GIC's IPRIORITYR register. +* \param [in] IRQn The interrupt to be queried. +*/ +__STATIC_INLINE uint32_t GIC_GetPriority(IRQn_Type IRQn) +{ + return (GICDistributor->IPRIORITYR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL; +} + +/** \brief Set the interrupt priority mask using CPU's PMR register. +* \param [in] priority Priority mask to be set. +*/ +__STATIC_INLINE void GIC_SetInterfacePriorityMask(uint32_t priority) +{ + GICInterface->PMR = priority & 0xFFUL; //set priority mask +} + +/** \brief Read the current interrupt priority mask from CPU's PMR register. +* \result GICInterface_Type::PMR +*/ +__STATIC_INLINE uint32_t GIC_GetInterfacePriorityMask(void) +{ + return GICInterface->PMR; +} + +/** \brief Configures the group priority and subpriority split point using CPU's BPR register. +* \param [in] binary_point Amount of bits used as subpriority. +*/ +__STATIC_INLINE void GIC_SetBinaryPoint(uint32_t binary_point) +{ + GICInterface->BPR = binary_point & 7U; //set binary point +} + +/** \brief Read the current group priority and subpriority split point from CPU's BPR register. +* \return GICInterface_Type::BPR +*/ +__STATIC_INLINE uint32_t GIC_GetBinaryPoint(void) +{ + return GICInterface->BPR; +} + +/** \brief Get the status for a given interrupt. +* \param [in] IRQn The interrupt to get status for. +* \return 0 - not pending/active, 1 - pending, 2 - active, 3 - pending and active +*/ +__STATIC_INLINE uint32_t GIC_GetIRQStatus(IRQn_Type IRQn) +{ + uint32_t pending, active; + + active = ((GICDistributor->ISACTIVER[IRQn / 32U]) >> (IRQn % 32U)) & 1UL; + pending = ((GICDistributor->ISPENDR[IRQn / 32U]) >> (IRQn % 32U)) & 1UL; + + return ((active<<1U) | pending); +} + +/** \brief Generate a software interrupt using GIC's SGIR register. +* \param [in] IRQn Software interrupt to be generated. +* \param [in] target_list List of CPUs the software interrupt should be forwarded to. +* \param [in] filter_list Filter to be applied to determine interrupt receivers. +*/ +__STATIC_INLINE void GIC_SendSGI(IRQn_Type IRQn, uint32_t target_list, uint32_t filter_list) +{ + GICDistributor->SGIR = ((filter_list & 3U) << 24U) | ((target_list & 0xFFUL) << 16U) | (IRQn & 0x0FUL); +} + +/** \brief Get the interrupt number of the highest interrupt pending from CPU's HPPIR register. +* \return GICInterface_Type::HPPIR +*/ +__STATIC_INLINE uint32_t GIC_GetHighPendingIRQ(void) +{ + return GICInterface->HPPIR; +} + +/** \brief Provides information about the implementer and revision of the CPU interface. +* \return GICInterface_Type::IIDR +*/ +__STATIC_INLINE uint32_t GIC_GetInterfaceId(void) +{ + return GICInterface->IIDR; +} + +/** \brief Set the interrupt group from the GIC's IGROUPR register. +* \param [in] IRQn The interrupt to be queried. +* \param [in] group Interrupt group number: 0 - Group 0, 1 - Group 1 +*/ +__STATIC_INLINE void GIC_SetGroup(IRQn_Type IRQn, uint32_t group) +{ + uint32_t igroupr = GICDistributor->IGROUPR[IRQn / 32U]; + uint32_t shift = (IRQn % 32U); + + igroupr &= (~(1U << shift)); + igroupr |= ( (group & 1U) << shift); + + GICDistributor->IGROUPR[IRQn / 32U] = igroupr; +} +#define GIC_SetSecurity GIC_SetGroup + +/** \brief Get the interrupt group from the GIC's IGROUPR register. +* \param [in] IRQn The interrupt to be queried. +* \return 0 - Group 0, 1 - Group 1 +*/ +__STATIC_INLINE uint32_t GIC_GetGroup(IRQn_Type IRQn) +{ + return (GICDistributor->IGROUPR[IRQn / 32U] >> (IRQn % 32U)) & 1UL; +} +#define GIC_GetSecurity GIC_GetGroup + +/** \brief Initialize the interrupt distributor. +*/ +__STATIC_INLINE void GIC_DistInit(void) +{ + uint32_t i; + uint32_t num_irq = 0U; + uint32_t priority_field; + + //A reset sets all bits in the IGROUPRs corresponding to the SPIs to 0, + //configuring all of the interrupts as Secure. + + //Disable interrupt forwarding + GIC_DisableDistributor(); + //Get the maximum number of interrupts that the GIC supports + num_irq = 32U * ((GIC_DistributorInfo() & 0x1FU) + 1U); + + /* Priority level is implementation defined. + To determine the number of priority bits implemented write 0xFF to an IPRIORITYR + priority field and read back the value stored.*/ + GIC_SetPriority((IRQn_Type)0U, 0xFFU); + priority_field = GIC_GetPriority((IRQn_Type)0U); + + for (i = 32U; i < num_irq; i++) + { + //Disable the SPI interrupt + GIC_DisableIRQ((IRQn_Type)i); + //Set level-sensitive (and N-N model) + GIC_SetConfiguration((IRQn_Type)i, 0U); + //Set priority + GIC_SetPriority((IRQn_Type)i, priority_field/2U); + //Set target list to CPU0 + GIC_SetTarget((IRQn_Type)i, 1U); + } + //Enable distributor + GIC_EnableDistributor(); +} + +/** \brief Initialize the CPU's interrupt interface +*/ +__STATIC_INLINE void GIC_CPUInterfaceInit(void) +{ + uint32_t i; + uint32_t priority_field; + + //A reset sets all bits in the IGROUPRs corresponding to the SPIs to 0, + //configuring all of the interrupts as Secure. + + //Disable interrupt forwarding + GIC_DisableInterface(); + + /* Priority level is implementation defined. + To determine the number of priority bits implemented write 0xFF to an IPRIORITYR + priority field and read back the value stored.*/ + GIC_SetPriority((IRQn_Type)0U, 0xFFU); + priority_field = GIC_GetPriority((IRQn_Type)0U); + + //SGI and PPI + for (i = 0U; i < 32U; i++) + { + if(i > 15U) { + //Set level-sensitive (and N-N model) for PPI + GIC_SetConfiguration((IRQn_Type)i, 0U); + } + //Disable SGI and PPI interrupts + GIC_DisableIRQ((IRQn_Type)i); + //Set priority + GIC_SetPriority((IRQn_Type)i, priority_field/2U); + } + //Enable interface + GIC_EnableInterface(); + //Set binary point to 0 + GIC_SetBinaryPoint(0U); + //Set priority mask + GIC_SetInterfacePriorityMask(0xFFU); +} + +/** \brief Initialize and enable the GIC +*/ +__STATIC_INLINE void GIC_Enable(void) +{ + GIC_DistInit(); + GIC_CPUInterfaceInit(); //per CPU +} + +#endif /* ARM_GIC_V20_H */ \ No newline at end of file diff --git a/CMSIS/Core/Include/a-profile/irq_ctrl.h b/CMSIS/Core/Include/a-profile/irq_ctrl.h index 1ca29a27e..b61991f62 100644 --- a/CMSIS/Core/Include/a-profile/irq_ctrl.h +++ b/CMSIS/Core/Include/a-profile/irq_ctrl.h @@ -5,7 +5,7 @@ * @date 03. March 2020 ******************************************************************************/ /* - * Copyright (c) 2017-2020 ARM Limited. All rights reserved. + * Copyright (c) 2017-2023 ARM Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/CMSIS/Core/Include/cmsis_compiler.h b/CMSIS/Core/Include/cmsis_compiler.h index 82c90b32e..1528115e7 100644 --- a/CMSIS/Core/Include/cmsis_compiler.h +++ b/CMSIS/Core/Include/cmsis_compiler.h @@ -31,74 +31,34 @@ * Arm Compiler above 6.10.1 (armclang) */ #if defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6100100) - #if __ARM_ARCH_PROFILE == 'A' - #include "./a-profile/cmsis_armclang_a.h" - #elif __ARM_ARCH_PROFILE == 'R' - #include "./r-profile/cmsis_armclang_r.h" - #elif __ARM_ARCH_PROFILE == 'M' - #include "./m-profile/cmsis_armclang_m.h" - #else - #error "Unknown Arm architecture profile" - #endif + #include "cmsis_armclang.h" /* * TI Arm Clang Compiler (tiarmclang) */ #elif defined (__ti__) - #if __ARM_ARCH_PROFILE == 'A' - #error "Core-A is not supported for this compiler" - #elif __ARM_ARCH_PROFILE == 'R' - #error "Core-R is not supported for this compiler" - #elif __ARM_ARCH_PROFILE == 'M' - #include "m-profile/cmsis_tiarmclang_m.h" - #else - #error "Unknown Arm architecture profile" - #endif + #include "cmsis_tiarmclang.h" /* * LLVM/Clang Compiler */ #elif defined ( __clang__ ) - #if __ARM_ARCH_PROFILE == 'A' - #include "a-profile/cmsis_clang_a.h" - #elif __ARM_ARCH_PROFILE == 'R' - #include "r-profile/cmsis_clang_r.h" - #elif __ARM_ARCH_PROFILE == 'M' - #include "m-profile/cmsis_clang_m.h" - #else - #error "Unknown Arm architecture profile" - #endif + #include "cmsis_clang.h" /* * GNU Compiler */ #elif defined ( __GNUC__ ) - #if __ARM_ARCH_PROFILE == 'A' - #include "a-profile/cmsis_gcc_a.h" - #elif __ARM_ARCH_PROFILE == 'R' - #include "r-profile/cmsis_gcc_r.h" - #elif __ARM_ARCH_PROFILE == 'M' - #include "m-profile/cmsis_gcc_m.h" - #else - #error "Unknown Arm architecture profile" - #endif + #include "cmsis_gcc.h" /* * IAR Compiler */ #elif defined ( __ICCARM__ ) - #if __ARM_ARCH_PROFILE == 'A' - #include "a-profile/cmsis_iccarm_a.h" - #elif __ARM_ARCH_PROFILE == 'R' - #include "r-profile/cmsis_iccarm_r.h" - #elif __ARM_ARCH_PROFILE == 'M' - #include "m-profile/cmsis_iccarm_m.h" - #else - #error "Unknown Arm architecture profile" - #endif + #include "cmsis_iccarm.h" /* @@ -122,6 +82,9 @@ #ifndef __NO_RETURN #define __NO_RETURN __attribute__((noreturn)) #endif + #ifndef CMSIS_DEPRECATED + #define CMSIS_DEPRECATED __attribute__((deprecated)) + #endif #ifndef __USED #define __USED __attribute__((used)) #endif @@ -174,6 +137,7 @@ #define __ALIAS(x) __attribute__ ((alias(x))) #endif + /* * TASKING Compiler */ @@ -199,6 +163,9 @@ #ifndef __NO_RETURN #define __NO_RETURN __attribute__((noreturn)) #endif + #ifndef CMSIS_DEPRECATED + #define CMSIS_DEPRECATED __attribute__((deprecated)) + #endif #ifndef __USED #define __USED __attribute__((used)) #endif @@ -252,6 +219,7 @@ #define __ALIAS(x) __attribute__ ((alias(x))) #endif + /* * COSMIC Compiler */ @@ -278,6 +246,10 @@ #warning No compiler specific solution for __USED. __USED is ignored. #define __USED #endif + #ifndef CMSIS_DEPRECATED + #warning No compiler specific solution for CMSIS_DEPRECATED. CMSIS_DEPRECATED is ignored. + #define CMSIS_DEPRECATED + #endif #ifndef __WEAK #define __WEAK __weak #endif diff --git a/CMSIS/Core/Include/cmsis_version.h b/CMSIS/Core/Include/cmsis_version.h index 8b4765f18..90529147c 100644 --- a/CMSIS/Core/Include/cmsis_version.h +++ b/CMSIS/Core/Include/cmsis_version.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file cmsis_version.h - * @brief CMSIS Core(M) Version definitions - * @version V5.0.5 - * @date 02. February 2022 + * @brief CMSIS Core Version definitions + * @version V6.0.0 + * @date 2. July 2023 ******************************************************************************/ /* - * Copyright (c) 2009-2022 ARM Limited. All rights reserved. + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -32,8 +32,24 @@ #define __CMSIS_VERSION_H /* CMSIS Version definitions */ -#define __CM_CMSIS_VERSION_MAIN ( 5U) /*!< [31:16] CMSIS Core(M) main version */ -#define __CM_CMSIS_VERSION_SUB ( 6U) /*!< [15:0] CMSIS Core(M) sub version */ -#define __CM_CMSIS_VERSION ((__CM_CMSIS_VERSION_MAIN << 16U) | \ - __CM_CMSIS_VERSION_SUB ) /*!< CMSIS Core(M) version number */ -#endif +#define __CMSIS_VERSION_MAIN ( 6U) /*!< \brief [31:16] CMSIS-Core(A/R/M) main version */ +#define __CMSIS_VERSION_SUB ( 0U) /*!< \brief [15:0] CMSIS-Core(A/R/M) sub version */ +#define __CMSIS_VERSION ((__CMSIS_VERSION_MAIN << 16U) | \ + _CMSIS_VERSION_SUB ) /*!< \brief CMSIS-Core(A/R/M) version number */ + +#define __CA_CMSIS_VERSION_MAIN (6U) /*!< \brief [31:16] CMSIS-Core(A) main version */ +#define __CA_CMSIS_VERSION_SUB (0U) /*!< \brief [15:0] CMSIS-Core(A) sub version */ +#define __CA_CMSIS_VERSION ((__CA_CMSIS_VERSION_MAIN << 16U) | \ + __CA_CMSIS_VERSION_SUB ) /*!< \brief CMSIS-Core(A) version number */ + +#define __CR_CMSIS_VERSION_MAIN (6U) /*!< \brief [31:16] CMSIS-Core(R) main version */ +#define __CR_CMSIS_VERSION_SUB (0U) /*!< \brief [15:0] CMSIS-Core(R) sub version */ +#define __CR_CMSIS_VERSION ((__CR_CMSIS_VERSION_MAIN << 16U) | \ + __CR_CMSIS_VERSION_SUB ) /*!< \brief CMSIS-Core(R) version number */ + +#define __CM_CMSIS_VERSION_MAIN ( 6U) /*!< \brief [31:16] CMSIS-Core(M) main version */ +#define __CM_CMSIS_VERSION_SUB ( 0U) /*!< \brief [15:0] CMSIS-Core(M) sub version */ +#define __CM_CMSIS_VERSION ((__CM_CMSIS_VERSION_MAIN << 16U) | \ + __CM_CMSIS_VERSION_SUB ) /*!< \brief CMSIS-Core(M) version number */ + +#endif \ No newline at end of file diff --git a/CMSIS/Core/Include/core_ca.h b/CMSIS/Core/Include/core_ca.h index 12a1a6d73..c8e3288ed 100644 --- a/CMSIS/Core/Include/core_ca.h +++ b/CMSIS/Core/Include/core_ca.h @@ -1,11 +1,11 @@ /**************************************************************************//** - * @file core_ca.h - * @brief CMSIS Cortex-A Core Peripheral Access Layer Header File - * @version V1.0.8 - * @date 23. March 2023 + * @file core_ca9.h + * @brief CMSIS Cortex-A9 Core Peripheral Access Layer Header File + * @version V1.0.0 + * @date 2. July 2023 ******************************************************************************/ /* - * Copyright (c) 2009-2022 ARM Limited. All rights reserved. + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -21,2920 +21,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + /* + * This file exists for compatibility reasons only + */ -#if defined ( __ICCARM__ ) - #pragma system_include /* treat file as system include file for MISRA check */ -#elif defined (__clang__) - #pragma clang system_header /* treat file as system include file */ -#endif -#ifndef __CORE_CA_H_GENERIC -#define __CORE_CA_H_GENERIC +#ifndef __CORE_CA_H +#define __CORE_CA_H -#ifdef __cplusplus - extern "C" { -#endif -/******************************************************************************* - * CMSIS definitions - ******************************************************************************/ - -/* CMSIS CA definitions */ -#define __CA_CMSIS_VERSION_MAIN (1U) /*!< \brief [31:16] CMSIS-Core(A) main version */ -#define __CA_CMSIS_VERSION_SUB (1U) /*!< \brief [15:0] CMSIS-Core(A) sub version */ -#define __CA_CMSIS_VERSION ((__CA_CMSIS_VERSION_MAIN << 16U) | \ - __CA_CMSIS_VERSION_SUB ) /*!< \brief CMSIS-Core(A) version number */ - -#if defined ( __CC_ARM ) - #if defined (__TARGET_FPU_VFP) - #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) - #define __FPU_USED 1U - #else - #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" - #define __FPU_USED 0U - #endif - #else - #define __FPU_USED 0U - #endif - -#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) - #if defined (__ARM_FP) - #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) - #define __FPU_USED 1U - #else - #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" - #define __FPU_USED 0U - #endif - #else - #define __FPU_USED 0U - #endif - -#elif defined ( __ICCARM__ ) - #if defined (__ARMVFP__) - #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) - #define __FPU_USED 1U - #else - #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" - #define __FPU_USED 0U - #endif - #else - #define __FPU_USED 0U - #endif - -#elif defined ( __TMS470__ ) - #if defined __TI_VFP_SUPPORT__ - #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) - #define __FPU_USED 1U - #else - #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" - #define __FPU_USED 0U - #endif - #else - #define __FPU_USED 0U - #endif - -#elif defined ( __GNUC__ ) - #if defined (__VFP_FP__) && !defined(__SOFTFP__) - #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) - #define __FPU_USED 1U - #else - #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" - #define __FPU_USED 0U - #endif - #else - #define __FPU_USED 0U - #endif - -#elif defined ( __TASKING__ ) - #if defined (__FPU_VFP__) - #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) - #define __FPU_USED 1U - #else - #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" - #define __FPU_USED 0U - #endif - #else - #define __FPU_USED 0U - #endif -#endif - -#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ - -#ifdef __cplusplus -} -#endif - -#endif /* __CORE_CA_H_GENERIC */ - -#ifndef __CMSIS_GENERIC - -#ifndef __CORE_CA_H_DEPENDANT -#define __CORE_CA_H_DEPENDANT - -#ifdef __cplusplus - extern "C" { -#endif - - /* check device defines and use defaults */ -#if defined __CHECK_DEVICE_DEFINES - #ifndef __CA_REV - #define __CA_REV 0x0000U - #warning "__CA_REV not defined in device header file; using default!" - #endif - - #ifndef __FPU_PRESENT - #define __FPU_PRESENT 0U - #warning "__FPU_PRESENT not defined in device header file; using default!" - #endif - - #ifndef __GIC_PRESENT - #define __GIC_PRESENT 1U - #warning "__GIC_PRESENT not defined in device header file; using default!" - #endif - - #ifndef __TIM_PRESENT - #define __TIM_PRESENT 1U - #warning "__TIM_PRESENT not defined in device header file; using default!" - #endif - - #ifndef __L2C_PRESENT - #define __L2C_PRESENT 0U - #warning "__L2C_PRESENT not defined in device header file; using default!" - #endif -#endif - -/* IO definitions (access restrictions to peripheral registers) */ -#ifdef __cplusplus - #define __I volatile /*!< \brief Defines 'read only' permissions */ -#else - #define __I volatile const /*!< \brief Defines 'read only' permissions */ -#endif -#define __O volatile /*!< \brief Defines 'write only' permissions */ -#define __IO volatile /*!< \brief Defines 'read / write' permissions */ - -/* following defines should be used for structure members */ -#define __IM volatile const /*!< \brief Defines 'read only' structure member permissions */ -#define __OM volatile /*!< \brief Defines 'write only' structure member permissions */ -#define __IOM volatile /*!< \brief Defines 'read / write' structure member permissions */ -#define RESERVED(N, T) T RESERVED##N; // placeholder struct members used for "reserved" areas - - /******************************************************************************* - * Register Abstraction - Core Register contain: - - CPSR - - CP15 Registers - - L2C-310 Cache Controller - - Generic Interrupt Controller Distributor - - Generic Interrupt Controller Interface - ******************************************************************************/ - -/* Core Register CPSR */ -typedef union -{ - struct - { - uint32_t M:5; /*!< \brief bit: 0.. 4 Mode field */ - uint32_t T:1; /*!< \brief bit: 5 Thumb execution state bit */ - uint32_t F:1; /*!< \brief bit: 6 FIQ mask bit */ - uint32_t I:1; /*!< \brief bit: 7 IRQ mask bit */ - uint32_t A:1; /*!< \brief bit: 8 Asynchronous abort mask bit */ - uint32_t E:1; /*!< \brief bit: 9 Endianness execution state bit */ - uint32_t IT1:6; /*!< \brief bit: 10..15 If-Then execution state bits 2-7 */ - uint32_t GE:4; /*!< \brief bit: 16..19 Greater than or Equal flags */ - RESERVED(0:4, uint32_t) - uint32_t J:1; /*!< \brief bit: 24 Jazelle bit */ - uint32_t IT0:2; /*!< \brief bit: 25..26 If-Then execution state bits 0-1 */ - uint32_t Q:1; /*!< \brief bit: 27 Saturation condition flag */ - uint32_t V:1; /*!< \brief bit: 28 Overflow condition code flag */ - uint32_t C:1; /*!< \brief bit: 29 Carry condition code flag */ - uint32_t Z:1; /*!< \brief bit: 30 Zero condition code flag */ - uint32_t N:1; /*!< \brief bit: 31 Negative condition code flag */ - } b; /*!< \brief Structure used for bit access */ - uint32_t w; /*!< \brief Type used for word access */ -} CPSR_Type; - - - -/* CPSR Register Definitions */ -#define CPSR_N_Pos 31U /*!< \brief CPSR: N Position */ -#define CPSR_N_Msk (1UL << CPSR_N_Pos) /*!< \brief CPSR: N Mask */ - -#define CPSR_Z_Pos 30U /*!< \brief CPSR: Z Position */ -#define CPSR_Z_Msk (1UL << CPSR_Z_Pos) /*!< \brief CPSR: Z Mask */ - -#define CPSR_C_Pos 29U /*!< \brief CPSR: C Position */ -#define CPSR_C_Msk (1UL << CPSR_C_Pos) /*!< \brief CPSR: C Mask */ - -#define CPSR_V_Pos 28U /*!< \brief CPSR: V Position */ -#define CPSR_V_Msk (1UL << CPSR_V_Pos) /*!< \brief CPSR: V Mask */ - -#define CPSR_Q_Pos 27U /*!< \brief CPSR: Q Position */ -#define CPSR_Q_Msk (1UL << CPSR_Q_Pos) /*!< \brief CPSR: Q Mask */ - -#define CPSR_IT0_Pos 25U /*!< \brief CPSR: IT0 Position */ -#define CPSR_IT0_Msk (3UL << CPSR_IT0_Pos) /*!< \brief CPSR: IT0 Mask */ - -#define CPSR_J_Pos 24U /*!< \brief CPSR: J Position */ -#define CPSR_J_Msk (1UL << CPSR_J_Pos) /*!< \brief CPSR: J Mask */ - -#define CPSR_GE_Pos 16U /*!< \brief CPSR: GE Position */ -#define CPSR_GE_Msk (0xFUL << CPSR_GE_Pos) /*!< \brief CPSR: GE Mask */ - -#define CPSR_IT1_Pos 10U /*!< \brief CPSR: IT1 Position */ -#define CPSR_IT1_Msk (0x3FUL << CPSR_IT1_Pos) /*!< \brief CPSR: IT1 Mask */ - -#define CPSR_E_Pos 9U /*!< \brief CPSR: E Position */ -#define CPSR_E_Msk (1UL << CPSR_E_Pos) /*!< \brief CPSR: E Mask */ - -#define CPSR_A_Pos 8U /*!< \brief CPSR: A Position */ -#define CPSR_A_Msk (1UL << CPSR_A_Pos) /*!< \brief CPSR: A Mask */ - -#define CPSR_I_Pos 7U /*!< \brief CPSR: I Position */ -#define CPSR_I_Msk (1UL << CPSR_I_Pos) /*!< \brief CPSR: I Mask */ - -#define CPSR_F_Pos 6U /*!< \brief CPSR: F Position */ -#define CPSR_F_Msk (1UL << CPSR_F_Pos) /*!< \brief CPSR: F Mask */ - -#define CPSR_T_Pos 5U /*!< \brief CPSR: T Position */ -#define CPSR_T_Msk (1UL << CPSR_T_Pos) /*!< \brief CPSR: T Mask */ - -#define CPSR_M_Pos 0U /*!< \brief CPSR: M Position */ -#define CPSR_M_Msk (0x1FUL << CPSR_M_Pos) /*!< \brief CPSR: M Mask */ - -#define CPSR_M_USR 0x10U /*!< \brief CPSR: M User mode (PL0) */ -#define CPSR_M_FIQ 0x11U /*!< \brief CPSR: M Fast Interrupt mode (PL1) */ -#define CPSR_M_IRQ 0x12U /*!< \brief CPSR: M Interrupt mode (PL1) */ -#define CPSR_M_SVC 0x13U /*!< \brief CPSR: M Supervisor mode (PL1) */ -#define CPSR_M_MON 0x16U /*!< \brief CPSR: M Monitor mode (PL1) */ -#define CPSR_M_ABT 0x17U /*!< \brief CPSR: M Abort mode (PL1) */ -#define CPSR_M_HYP 0x1AU /*!< \brief CPSR: M Hypervisor mode (PL2) */ -#define CPSR_M_UND 0x1BU /*!< \brief CPSR: M Undefined mode (PL1) */ -#define CPSR_M_SYS 0x1FU /*!< \brief CPSR: M System mode (PL1) */ - -/* CP15 Register SCTLR */ -typedef union -{ - struct - { - uint32_t M:1; /*!< \brief bit: 0 MMU enable */ - uint32_t A:1; /*!< \brief bit: 1 Alignment check enable */ - uint32_t C:1; /*!< \brief bit: 2 Cache enable */ - RESERVED(0:2, uint32_t) - uint32_t CP15BEN:1; /*!< \brief bit: 5 CP15 barrier enable */ - RESERVED(1:1, uint32_t) - uint32_t B:1; /*!< \brief bit: 7 Endianness model */ - RESERVED(2:2, uint32_t) - uint32_t SW:1; /*!< \brief bit: 10 SWP and SWPB enable */ - uint32_t Z:1; /*!< \brief bit: 11 Branch prediction enable */ - uint32_t I:1; /*!< \brief bit: 12 Instruction cache enable */ - uint32_t V:1; /*!< \brief bit: 13 Vectors bit */ - uint32_t RR:1; /*!< \brief bit: 14 Round Robin select */ - RESERVED(3:2, uint32_t) - uint32_t HA:1; /*!< \brief bit: 17 Hardware Access flag enable */ - RESERVED(4:1, uint32_t) - uint32_t WXN:1; /*!< \brief bit: 19 Write permission implies XN */ - uint32_t UWXN:1; /*!< \brief bit: 20 Unprivileged write permission implies PL1 XN */ - uint32_t FI:1; /*!< \brief bit: 21 Fast interrupts configuration enable */ - uint32_t U:1; /*!< \brief bit: 22 Alignment model */ - RESERVED(5:1, uint32_t) - uint32_t VE:1; /*!< \brief bit: 24 Interrupt Vectors Enable */ - uint32_t EE:1; /*!< \brief bit: 25 Exception Endianness */ - RESERVED(6:1, uint32_t) - uint32_t NMFI:1; /*!< \brief bit: 27 Non-maskable FIQ (NMFI) support */ - uint32_t TRE:1; /*!< \brief bit: 28 TEX remap enable. */ - uint32_t AFE:1; /*!< \brief bit: 29 Access flag enable */ - uint32_t TE:1; /*!< \brief bit: 30 Thumb Exception enable */ - RESERVED(7:1, uint32_t) - } b; /*!< \brief Structure used for bit access */ - uint32_t w; /*!< \brief Type used for word access */ -} SCTLR_Type; - -#define SCTLR_TE_Pos 30U /*!< \brief SCTLR: TE Position */ -#define SCTLR_TE_Msk (1UL << SCTLR_TE_Pos) /*!< \brief SCTLR: TE Mask */ - -#define SCTLR_AFE_Pos 29U /*!< \brief SCTLR: AFE Position */ -#define SCTLR_AFE_Msk (1UL << SCTLR_AFE_Pos) /*!< \brief SCTLR: AFE Mask */ - -#define SCTLR_TRE_Pos 28U /*!< \brief SCTLR: TRE Position */ -#define SCTLR_TRE_Msk (1UL << SCTLR_TRE_Pos) /*!< \brief SCTLR: TRE Mask */ - -#define SCTLR_NMFI_Pos 27U /*!< \brief SCTLR: NMFI Position */ -#define SCTLR_NMFI_Msk (1UL << SCTLR_NMFI_Pos) /*!< \brief SCTLR: NMFI Mask */ - -#define SCTLR_EE_Pos 25U /*!< \brief SCTLR: EE Position */ -#define SCTLR_EE_Msk (1UL << SCTLR_EE_Pos) /*!< \brief SCTLR: EE Mask */ - -#define SCTLR_VE_Pos 24U /*!< \brief SCTLR: VE Position */ -#define SCTLR_VE_Msk (1UL << SCTLR_VE_Pos) /*!< \brief SCTLR: VE Mask */ - -#define SCTLR_U_Pos 22U /*!< \brief SCTLR: U Position */ -#define SCTLR_U_Msk (1UL << SCTLR_U_Pos) /*!< \brief SCTLR: U Mask */ - -#define SCTLR_FI_Pos 21U /*!< \brief SCTLR: FI Position */ -#define SCTLR_FI_Msk (1UL << SCTLR_FI_Pos) /*!< \brief SCTLR: FI Mask */ - -#define SCTLR_UWXN_Pos 20U /*!< \brief SCTLR: UWXN Position */ -#define SCTLR_UWXN_Msk (1UL << SCTLR_UWXN_Pos) /*!< \brief SCTLR: UWXN Mask */ - -#define SCTLR_WXN_Pos 19U /*!< \brief SCTLR: WXN Position */ -#define SCTLR_WXN_Msk (1UL << SCTLR_WXN_Pos) /*!< \brief SCTLR: WXN Mask */ - -#define SCTLR_HA_Pos 17U /*!< \brief SCTLR: HA Position */ -#define SCTLR_HA_Msk (1UL << SCTLR_HA_Pos) /*!< \brief SCTLR: HA Mask */ - -#define SCTLR_RR_Pos 14U /*!< \brief SCTLR: RR Position */ -#define SCTLR_RR_Msk (1UL << SCTLR_RR_Pos) /*!< \brief SCTLR: RR Mask */ - -#define SCTLR_V_Pos 13U /*!< \brief SCTLR: V Position */ -#define SCTLR_V_Msk (1UL << SCTLR_V_Pos) /*!< \brief SCTLR: V Mask */ - -#define SCTLR_I_Pos 12U /*!< \brief SCTLR: I Position */ -#define SCTLR_I_Msk (1UL << SCTLR_I_Pos) /*!< \brief SCTLR: I Mask */ - -#define SCTLR_Z_Pos 11U /*!< \brief SCTLR: Z Position */ -#define SCTLR_Z_Msk (1UL << SCTLR_Z_Pos) /*!< \brief SCTLR: Z Mask */ - -#define SCTLR_SW_Pos 10U /*!< \brief SCTLR: SW Position */ -#define SCTLR_SW_Msk (1UL << SCTLR_SW_Pos) /*!< \brief SCTLR: SW Mask */ - -#define SCTLR_B_Pos 7U /*!< \brief SCTLR: B Position */ -#define SCTLR_B_Msk (1UL << SCTLR_B_Pos) /*!< \brief SCTLR: B Mask */ - -#define SCTLR_CP15BEN_Pos 5U /*!< \brief SCTLR: CP15BEN Position */ -#define SCTLR_CP15BEN_Msk (1UL << SCTLR_CP15BEN_Pos) /*!< \brief SCTLR: CP15BEN Mask */ - -#define SCTLR_C_Pos 2U /*!< \brief SCTLR: C Position */ -#define SCTLR_C_Msk (1UL << SCTLR_C_Pos) /*!< \brief SCTLR: C Mask */ - -#define SCTLR_A_Pos 1U /*!< \brief SCTLR: A Position */ -#define SCTLR_A_Msk (1UL << SCTLR_A_Pos) /*!< \brief SCTLR: A Mask */ - -#define SCTLR_M_Pos 0U /*!< \brief SCTLR: M Position */ -#define SCTLR_M_Msk (1UL << SCTLR_M_Pos) /*!< \brief SCTLR: M Mask */ - -/* CP15 Register ACTLR */ -typedef union -{ -#if __CORTEX_A == 5 || defined(DOXYGEN) - /** \brief Structure used for bit access on Cortex-A5 */ - struct - { - uint32_t FW:1; /*!< \brief bit: 0 Cache and TLB maintenance broadcast */ - RESERVED(0:5, uint32_t) - uint32_t SMP:1; /*!< \brief bit: 6 Enables coherent requests to the processor */ - uint32_t EXCL:1; /*!< \brief bit: 7 Exclusive L1/L2 cache control */ - RESERVED(1:2, uint32_t) - uint32_t DODMBS:1; /*!< \brief bit: 10 Disable optimized data memory barrier behavior */ - uint32_t DWBST:1; /*!< \brief bit: 11 AXI data write bursts to Normal memory */ - uint32_t RADIS:1; /*!< \brief bit: 12 L1 Data Cache read-allocate mode disable */ - uint32_t L1PCTL:2; /*!< \brief bit:13..14 L1 Data prefetch control */ - uint32_t BP:2; /*!< \brief bit:16..15 Branch prediction policy */ - uint32_t RSDIS:1; /*!< \brief bit: 17 Disable return stack operation */ - uint32_t BTDIS:1; /*!< \brief bit: 18 Disable indirect Branch Target Address Cache (BTAC) */ - RESERVED(3:9, uint32_t) - uint32_t DBDI:1; /*!< \brief bit: 28 Disable branch dual issue */ - RESERVED(7:3, uint32_t) - } b; -#endif -#if __CORTEX_A == 7 || defined(DOXYGEN) - /** \brief Structure used for bit access on Cortex-A7 */ - struct - { - RESERVED(0:6, uint32_t) - uint32_t SMP:1; /*!< \brief bit: 6 Enables coherent requests to the processor */ - RESERVED(1:3, uint32_t) - uint32_t DODMBS:1; /*!< \brief bit: 10 Disable optimized data memory barrier behavior */ - uint32_t L2RADIS:1; /*!< \brief bit: 11 L2 Data Cache read-allocate mode disable */ - uint32_t L1RADIS:1; /*!< \brief bit: 12 L1 Data Cache read-allocate mode disable */ - uint32_t L1PCTL:2; /*!< \brief bit:13..14 L1 Data prefetch control */ - uint32_t DDVM:1; /*!< \brief bit: 15 Disable Distributed Virtual Memory (DVM) transactions */ - RESERVED(3:12, uint32_t) - uint32_t DDI:1; /*!< \brief bit: 28 Disable dual issue */ - RESERVED(7:3, uint32_t) - } b; -#endif -#if __CORTEX_A == 9 || defined(DOXYGEN) - /** \brief Structure used for bit access on Cortex-A9 */ - struct - { - uint32_t FW:1; /*!< \brief bit: 0 Cache and TLB maintenance broadcast */ - RESERVED(0:1, uint32_t) - uint32_t L1PE:1; /*!< \brief bit: 2 Dside prefetch */ - uint32_t WFLZM:1; /*!< \brief bit: 3 Cache and TLB maintenance broadcast */ - RESERVED(1:2, uint32_t) - uint32_t SMP:1; /*!< \brief bit: 6 Enables coherent requests to the processor */ - uint32_t EXCL:1; /*!< \brief bit: 7 Exclusive L1/L2 cache control */ - uint32_t AOW:1; /*!< \brief bit: 8 Enable allocation in one cache way only */ - uint32_t PARITY:1; /*!< \brief bit: 9 Support for parity checking, if implemented */ - RESERVED(7:22, uint32_t) - } b; -#endif - uint32_t w; /*!< \brief Type used for word access */ -} ACTLR_Type; - -#define ACTLR_DDI_Pos 28U /*!< \brief ACTLR: DDI Position */ -#define ACTLR_DDI_Msk (1UL << ACTLR_DDI_Pos) /*!< \brief ACTLR: DDI Mask */ - -#define ACTLR_DBDI_Pos 28U /*!< \brief ACTLR: DBDI Position */ -#define ACTLR_DBDI_Msk (1UL << ACTLR_DBDI_Pos) /*!< \brief ACTLR: DBDI Mask */ - -#define ACTLR_BTDIS_Pos 18U /*!< \brief ACTLR: BTDIS Position */ -#define ACTLR_BTDIS_Msk (1UL << ACTLR_BTDIS_Pos) /*!< \brief ACTLR: BTDIS Mask */ - -#define ACTLR_RSDIS_Pos 17U /*!< \brief ACTLR: RSDIS Position */ -#define ACTLR_RSDIS_Msk (1UL << ACTLR_RSDIS_Pos) /*!< \brief ACTLR: RSDIS Mask */ - -#define ACTLR_BP_Pos 15U /*!< \brief ACTLR: BP Position */ -#define ACTLR_BP_Msk (3UL << ACTLR_BP_Pos) /*!< \brief ACTLR: BP Mask */ - -#define ACTLR_DDVM_Pos 15U /*!< \brief ACTLR: DDVM Position */ -#define ACTLR_DDVM_Msk (1UL << ACTLR_DDVM_Pos) /*!< \brief ACTLR: DDVM Mask */ - -#define ACTLR_L1PCTL_Pos 13U /*!< \brief ACTLR: L1PCTL Position */ -#define ACTLR_L1PCTL_Msk (3UL << ACTLR_L1PCTL_Pos) /*!< \brief ACTLR: L1PCTL Mask */ - -#define ACTLR_RADIS_Pos 12U /*!< \brief ACTLR: RADIS Position */ -#define ACTLR_RADIS_Msk (1UL << ACTLR_RADIS_Pos) /*!< \brief ACTLR: RADIS Mask */ - -#define ACTLR_L1RADIS_Pos 12U /*!< \brief ACTLR: L1RADIS Position */ -#define ACTLR_L1RADIS_Msk (1UL << ACTLR_L1RADIS_Pos) /*!< \brief ACTLR: L1RADIS Mask */ - -#define ACTLR_DWBST_Pos 11U /*!< \brief ACTLR: DWBST Position */ -#define ACTLR_DWBST_Msk (1UL << ACTLR_DWBST_Pos) /*!< \brief ACTLR: DWBST Mask */ - -#define ACTLR_L2RADIS_Pos 11U /*!< \brief ACTLR: L2RADIS Position */ -#define ACTLR_L2RADIS_Msk (1UL << ACTLR_L2RADIS_Pos) /*!< \brief ACTLR: L2RADIS Mask */ - -#define ACTLR_DODMBS_Pos 10U /*!< \brief ACTLR: DODMBS Position */ -#define ACTLR_DODMBS_Msk (1UL << ACTLR_DODMBS_Pos) /*!< \brief ACTLR: DODMBS Mask */ - -#define ACTLR_PARITY_Pos 9U /*!< \brief ACTLR: PARITY Position */ -#define ACTLR_PARITY_Msk (1UL << ACTLR_PARITY_Pos) /*!< \brief ACTLR: PARITY Mask */ - -#define ACTLR_AOW_Pos 8U /*!< \brief ACTLR: AOW Position */ -#define ACTLR_AOW_Msk (1UL << ACTLR_AOW_Pos) /*!< \brief ACTLR: AOW Mask */ - -#define ACTLR_EXCL_Pos 7U /*!< \brief ACTLR: EXCL Position */ -#define ACTLR_EXCL_Msk (1UL << ACTLR_EXCL_Pos) /*!< \brief ACTLR: EXCL Mask */ - -#define ACTLR_SMP_Pos 6U /*!< \brief ACTLR: SMP Position */ -#define ACTLR_SMP_Msk (1UL << ACTLR_SMP_Pos) /*!< \brief ACTLR: SMP Mask */ - -#define ACTLR_WFLZM_Pos 3U /*!< \brief ACTLR: WFLZM Position */ -#define ACTLR_WFLZM_Msk (1UL << ACTLR_WFLZM_Pos) /*!< \brief ACTLR: WFLZM Mask */ - -#define ACTLR_L1PE_Pos 2U /*!< \brief ACTLR: L1PE Position */ -#define ACTLR_L1PE_Msk (1UL << ACTLR_L1PE_Pos) /*!< \brief ACTLR: L1PE Mask */ - -#define ACTLR_FW_Pos 0U /*!< \brief ACTLR: FW Position */ -#define ACTLR_FW_Msk (1UL << ACTLR_FW_Pos) /*!< \brief ACTLR: FW Mask */ - -/* CP15 Register CPACR */ -typedef union -{ - struct - { - uint32_t CP0:2; /*!< \brief bit: 0..1 Access rights for coprocessor 0 */ - uint32_t CP1:2; /*!< \brief bit: 2..3 Access rights for coprocessor 1 */ - uint32_t CP2:2; /*!< \brief bit: 4..5 Access rights for coprocessor 2 */ - uint32_t CP3:2; /*!< \brief bit: 6..7 Access rights for coprocessor 3 */ - uint32_t CP4:2; /*!< \brief bit: 8..9 Access rights for coprocessor 4 */ - uint32_t CP5:2; /*!< \brief bit:10..11 Access rights for coprocessor 5 */ - uint32_t CP6:2; /*!< \brief bit:12..13 Access rights for coprocessor 6 */ - uint32_t CP7:2; /*!< \brief bit:14..15 Access rights for coprocessor 7 */ - uint32_t CP8:2; /*!< \brief bit:16..17 Access rights for coprocessor 8 */ - uint32_t CP9:2; /*!< \brief bit:18..19 Access rights for coprocessor 9 */ - uint32_t CP10:2; /*!< \brief bit:20..21 Access rights for coprocessor 10 */ - uint32_t CP11:2; /*!< \brief bit:22..23 Access rights for coprocessor 11 */ - uint32_t CP12:2; /*!< \brief bit:24..25 Access rights for coprocessor 11 */ - uint32_t CP13:2; /*!< \brief bit:26..27 Access rights for coprocessor 11 */ - uint32_t TRCDIS:1; /*!< \brief bit: 28 Disable CP14 access to trace registers */ - RESERVED(0:1, uint32_t) - uint32_t D32DIS:1; /*!< \brief bit: 30 Disable use of registers D16-D31 of the VFP register file */ - uint32_t ASEDIS:1; /*!< \brief bit: 31 Disable Advanced SIMD Functionality */ - } b; /*!< \brief Structure used for bit access */ - uint32_t w; /*!< \brief Type used for word access */ -} CPACR_Type; - -#define CPACR_ASEDIS_Pos 31U /*!< \brief CPACR: ASEDIS Position */ -#define CPACR_ASEDIS_Msk (1UL << CPACR_ASEDIS_Pos) /*!< \brief CPACR: ASEDIS Mask */ - -#define CPACR_D32DIS_Pos 30U /*!< \brief CPACR: D32DIS Position */ -#define CPACR_D32DIS_Msk (1UL << CPACR_D32DIS_Pos) /*!< \brief CPACR: D32DIS Mask */ - -#define CPACR_TRCDIS_Pos 28U /*!< \brief CPACR: D32DIS Position */ -#define CPACR_TRCDIS_Msk (1UL << CPACR_D32DIS_Pos) /*!< \brief CPACR: D32DIS Mask */ - -#define CPACR_CP_Pos_(n) (n*2U) /*!< \brief CPACR: CPn Position */ -#define CPACR_CP_Msk_(n) (3UL << CPACR_CP_Pos_(n)) /*!< \brief CPACR: CPn Mask */ - -#define CPACR_CP_NA 0U /*!< \brief CPACR CPn field: Access denied. */ -#define CPACR_CP_PL1 1U /*!< \brief CPACR CPn field: Accessible from PL1 only. */ -#define CPACR_CP_FA 3U /*!< \brief CPACR CPn field: Full access. */ - -/* CP15 Register DFSR */ -typedef union -{ - struct - { - uint32_t FS0:4; /*!< \brief bit: 0.. 3 Fault Status bits bit 0-3 */ - uint32_t Domain:4; /*!< \brief bit: 4.. 7 Fault on which domain */ - RESERVED(0:1, uint32_t) - uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */ - uint32_t FS1:1; /*!< \brief bit: 10 Fault Status bits bit 4 */ - uint32_t WnR:1; /*!< \brief bit: 11 Write not Read bit */ - uint32_t ExT:1; /*!< \brief bit: 12 External abort type */ - uint32_t CM:1; /*!< \brief bit: 13 Cache maintenance fault */ - RESERVED(1:18, uint32_t) - } s; /*!< \brief Structure used for bit access in short format */ - struct - { - uint32_t STATUS:5; /*!< \brief bit: 0.. 5 Fault Status bits */ - RESERVED(0:3, uint32_t) - uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */ - RESERVED(1:1, uint32_t) - uint32_t WnR:1; /*!< \brief bit: 11 Write not Read bit */ - uint32_t ExT:1; /*!< \brief bit: 12 External abort type */ - uint32_t CM:1; /*!< \brief bit: 13 Cache maintenance fault */ - RESERVED(2:18, uint32_t) - } l; /*!< \brief Structure used for bit access in long format */ - uint32_t w; /*!< \brief Type used for word access */ -} DFSR_Type; - -#define DFSR_CM_Pos 13U /*!< \brief DFSR: CM Position */ -#define DFSR_CM_Msk (1UL << DFSR_CM_Pos) /*!< \brief DFSR: CM Mask */ - -#define DFSR_Ext_Pos 12U /*!< \brief DFSR: Ext Position */ -#define DFSR_Ext_Msk (1UL << DFSR_Ext_Pos) /*!< \brief DFSR: Ext Mask */ - -#define DFSR_WnR_Pos 11U /*!< \brief DFSR: WnR Position */ -#define DFSR_WnR_Msk (1UL << DFSR_WnR_Pos) /*!< \brief DFSR: WnR Mask */ - -#define DFSR_FS1_Pos 10U /*!< \brief DFSR: FS1 Position */ -#define DFSR_FS1_Msk (1UL << DFSR_FS1_Pos) /*!< \brief DFSR: FS1 Mask */ - -#define DFSR_LPAE_Pos 9U /*!< \brief DFSR: LPAE Position */ -#define DFSR_LPAE_Msk (1UL << DFSR_LPAE_Pos) /*!< \brief DFSR: LPAE Mask */ - -#define DFSR_Domain_Pos 4U /*!< \brief DFSR: Domain Position */ -#define DFSR_Domain_Msk (0xFUL << DFSR_Domain_Pos) /*!< \brief DFSR: Domain Mask */ - -#define DFSR_FS0_Pos 0U /*!< \brief DFSR: FS0 Position */ -#define DFSR_FS0_Msk (0xFUL << DFSR_FS0_Pos) /*!< \brief DFSR: FS0 Mask */ - -#define DFSR_STATUS_Pos 0U /*!< \brief DFSR: STATUS Position */ -#define DFSR_STATUS_Msk (0x3FUL << DFSR_STATUS_Pos) /*!< \brief DFSR: STATUS Mask */ - -/* CP15 Register IFSR */ -typedef union -{ - struct - { - uint32_t FS0:4; /*!< \brief bit: 0.. 3 Fault Status bits bit 0-3 */ - RESERVED(0:5, uint32_t) - uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */ - uint32_t FS1:1; /*!< \brief bit: 10 Fault Status bits bit 4 */ - RESERVED(1:1, uint32_t) - uint32_t ExT:1; /*!< \brief bit: 12 External abort type */ - RESERVED(2:19, uint32_t) - } s; /*!< \brief Structure used for bit access in short format */ - struct - { - uint32_t STATUS:6; /*!< \brief bit: 0.. 5 Fault Status bits */ - RESERVED(0:3, uint32_t) - uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */ - RESERVED(1:2, uint32_t) - uint32_t ExT:1; /*!< \brief bit: 12 External abort type */ - RESERVED(2:19, uint32_t) - } l; /*!< \brief Structure used for bit access in long format */ - uint32_t w; /*!< \brief Type used for word access */ -} IFSR_Type; - -#define IFSR_ExT_Pos 12U /*!< \brief IFSR: ExT Position */ -#define IFSR_ExT_Msk (1UL << IFSR_ExT_Pos) /*!< \brief IFSR: ExT Mask */ - -#define IFSR_FS1_Pos 10U /*!< \brief IFSR: FS1 Position */ -#define IFSR_FS1_Msk (1UL << IFSR_FS1_Pos) /*!< \brief IFSR: FS1 Mask */ - -#define IFSR_LPAE_Pos 9U /*!< \brief IFSR: LPAE Position */ -#define IFSR_LPAE_Msk (0x1UL << IFSR_LPAE_Pos) /*!< \brief IFSR: LPAE Mask */ - -#define IFSR_FS0_Pos 0U /*!< \brief IFSR: FS0 Position */ -#define IFSR_FS0_Msk (0xFUL << IFSR_FS0_Pos) /*!< \brief IFSR: FS0 Mask */ - -#define IFSR_STATUS_Pos 0U /*!< \brief IFSR: STATUS Position */ -#define IFSR_STATUS_Msk (0x3FUL << IFSR_STATUS_Pos) /*!< \brief IFSR: STATUS Mask */ - -/* CP15 Register ISR */ -typedef union -{ - struct - { - RESERVED(0:6, uint32_t) - uint32_t F:1; /*!< \brief bit: 6 FIQ pending bit */ - uint32_t I:1; /*!< \brief bit: 7 IRQ pending bit */ - uint32_t A:1; /*!< \brief bit: 8 External abort pending bit */ - RESERVED(1:23, uint32_t) - } b; /*!< \brief Structure used for bit access */ - uint32_t w; /*!< \brief Type used for word access */ -} ISR_Type; - -#define ISR_A_Pos 13U /*!< \brief ISR: A Position */ -#define ISR_A_Msk (1UL << ISR_A_Pos) /*!< \brief ISR: A Mask */ - -#define ISR_I_Pos 12U /*!< \brief ISR: I Position */ -#define ISR_I_Msk (1UL << ISR_I_Pos) /*!< \brief ISR: I Mask */ - -#define ISR_F_Pos 11U /*!< \brief ISR: F Position */ -#define ISR_F_Msk (1UL << ISR_F_Pos) /*!< \brief ISR: F Mask */ - -/* DACR Register */ -#define DACR_D_Pos_(n) (2U*n) /*!< \brief DACR: Dn Position */ -#define DACR_D_Msk_(n) (3UL << DACR_D_Pos_(n)) /*!< \brief DACR: Dn Mask */ -#define DACR_Dn_NOACCESS 0U /*!< \brief DACR Dn field: No access */ -#define DACR_Dn_CLIENT 1U /*!< \brief DACR Dn field: Client */ -#define DACR_Dn_MANAGER 3U /*!< \brief DACR Dn field: Manager */ - -/** - \brief Mask and shift a bit field value for use in a register bit range. - \param [in] field Name of the register bit field. - \param [in] value Value of the bit field. This parameter is interpreted as an uint32_t type. - \return Masked and shifted value. -*/ -#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) - -/** - \brief Mask and shift a register value to extract a bit filed value. - \param [in] field Name of the register bit field. - \param [in] value Value of register. This parameter is interpreted as an uint32_t type. - \return Masked and shifted bit field value. -*/ -#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) - - -/** - \brief Union type to access the L2C_310 Cache Controller. -*/ -#if (__L2C_PRESENT == 1U) || defined(DOXYGEN) -typedef struct -{ - __IM uint32_t CACHE_ID; /*!< \brief Offset: 0x0000 (R/ ) Cache ID Register */ - __IM uint32_t CACHE_TYPE; /*!< \brief Offset: 0x0004 (R/ ) Cache Type Register */ - RESERVED(0[0x3e], uint32_t) - __IOM uint32_t CONTROL; /*!< \brief Offset: 0x0100 (R/W) Control Register */ - __IOM uint32_t AUX_CNT; /*!< \brief Offset: 0x0104 (R/W) Auxiliary Control */ - RESERVED(1[0x3e], uint32_t) - __IOM uint32_t EVENT_CONTROL; /*!< \brief Offset: 0x0200 (R/W) Event Counter Control */ - __IOM uint32_t EVENT_COUNTER1_CONF; /*!< \brief Offset: 0x0204 (R/W) Event Counter 1 Configuration */ - __IOM uint32_t EVENT_COUNTER0_CONF; /*!< \brief Offset: 0x0208 (R/W) Event Counter 1 Configuration */ - RESERVED(2[0x2], uint32_t) - __IOM uint32_t INTERRUPT_MASK; /*!< \brief Offset: 0x0214 (R/W) Interrupt Mask */ - __IM uint32_t MASKED_INT_STATUS; /*!< \brief Offset: 0x0218 (R/ ) Masked Interrupt Status */ - __IM uint32_t RAW_INT_STATUS; /*!< \brief Offset: 0x021c (R/ ) Raw Interrupt Status */ - __OM uint32_t INTERRUPT_CLEAR; /*!< \brief Offset: 0x0220 ( /W) Interrupt Clear */ - RESERVED(3[0x143], uint32_t) - __IOM uint32_t CACHE_SYNC; /*!< \brief Offset: 0x0730 (R/W) Cache Sync */ - RESERVED(4[0xf], uint32_t) - __IOM uint32_t INV_LINE_PA; /*!< \brief Offset: 0x0770 (R/W) Invalidate Line By PA */ - RESERVED(6[2], uint32_t) - __IOM uint32_t INV_WAY; /*!< \brief Offset: 0x077c (R/W) Invalidate by Way */ - RESERVED(5[0xc], uint32_t) - __IOM uint32_t CLEAN_LINE_PA; /*!< \brief Offset: 0x07b0 (R/W) Clean Line by PA */ - RESERVED(7[1], uint32_t) - __IOM uint32_t CLEAN_LINE_INDEX_WAY; /*!< \brief Offset: 0x07b8 (R/W) Clean Line by Index/Way */ - __IOM uint32_t CLEAN_WAY; /*!< \brief Offset: 0x07bc (R/W) Clean by Way */ - RESERVED(8[0xc], uint32_t) - __IOM uint32_t CLEAN_INV_LINE_PA; /*!< \brief Offset: 0x07f0 (R/W) Clean and Invalidate Line by PA */ - RESERVED(9[1], uint32_t) - __IOM uint32_t CLEAN_INV_LINE_INDEX_WAY; /*!< \brief Offset: 0x07f8 (R/W) Clean and Invalidate Line by Index/Way */ - __IOM uint32_t CLEAN_INV_WAY; /*!< \brief Offset: 0x07fc (R/W) Clean and Invalidate by Way */ - RESERVED(10[0x40], uint32_t) - __IOM uint32_t DATA_LOCK_0_WAY; /*!< \brief Offset: 0x0900 (R/W) Data Lockdown 0 by Way */ - __IOM uint32_t INST_LOCK_0_WAY; /*!< \brief Offset: 0x0904 (R/W) Instruction Lockdown 0 by Way */ - __IOM uint32_t DATA_LOCK_1_WAY; /*!< \brief Offset: 0x0908 (R/W) Data Lockdown 1 by Way */ - __IOM uint32_t INST_LOCK_1_WAY; /*!< \brief Offset: 0x090c (R/W) Instruction Lockdown 1 by Way */ - __IOM uint32_t DATA_LOCK_2_WAY; /*!< \brief Offset: 0x0910 (R/W) Data Lockdown 2 by Way */ - __IOM uint32_t INST_LOCK_2_WAY; /*!< \brief Offset: 0x0914 (R/W) Instruction Lockdown 2 by Way */ - __IOM uint32_t DATA_LOCK_3_WAY; /*!< \brief Offset: 0x0918 (R/W) Data Lockdown 3 by Way */ - __IOM uint32_t INST_LOCK_3_WAY; /*!< \brief Offset: 0x091c (R/W) Instruction Lockdown 3 by Way */ - __IOM uint32_t DATA_LOCK_4_WAY; /*!< \brief Offset: 0x0920 (R/W) Data Lockdown 4 by Way */ - __IOM uint32_t INST_LOCK_4_WAY; /*!< \brief Offset: 0x0924 (R/W) Instruction Lockdown 4 by Way */ - __IOM uint32_t DATA_LOCK_5_WAY; /*!< \brief Offset: 0x0928 (R/W) Data Lockdown 5 by Way */ - __IOM uint32_t INST_LOCK_5_WAY; /*!< \brief Offset: 0x092c (R/W) Instruction Lockdown 5 by Way */ - __IOM uint32_t DATA_LOCK_6_WAY; /*!< \brief Offset: 0x0930 (R/W) Data Lockdown 5 by Way */ - __IOM uint32_t INST_LOCK_6_WAY; /*!< \brief Offset: 0x0934 (R/W) Instruction Lockdown 5 by Way */ - __IOM uint32_t DATA_LOCK_7_WAY; /*!< \brief Offset: 0x0938 (R/W) Data Lockdown 6 by Way */ - __IOM uint32_t INST_LOCK_7_WAY; /*!< \brief Offset: 0x093c (R/W) Instruction Lockdown 6 by Way */ - RESERVED(11[0x4], uint32_t) - __IOM uint32_t LOCK_LINE_EN; /*!< \brief Offset: 0x0950 (R/W) Lockdown by Line Enable */ - __IOM uint32_t UNLOCK_ALL_BY_WAY; /*!< \brief Offset: 0x0954 (R/W) Unlock All Lines by Way */ - RESERVED(12[0xaa], uint32_t) - __IOM uint32_t ADDRESS_FILTER_START; /*!< \brief Offset: 0x0c00 (R/W) Address Filtering Start */ - __IOM uint32_t ADDRESS_FILTER_END; /*!< \brief Offset: 0x0c04 (R/W) Address Filtering End */ - RESERVED(13[0xce], uint32_t) - __IOM uint32_t DEBUG_CONTROL; /*!< \brief Offset: 0x0f40 (R/W) Debug Control Register */ -} L2C_310_TypeDef; - -#define L2C_310 ((L2C_310_TypeDef *)L2C_310_BASE) /*!< \brief L2C_310 register set access pointer */ -#endif - -#if (__GIC_PRESENT == 1U) || defined(DOXYGEN) - -/** \brief Structure type to access the Generic Interrupt Controller Distributor (GICD) -*/ -typedef struct -{ - __IOM uint32_t CTLR; /*!< \brief Offset: 0x000 (R/W) Distributor Control Register */ - __IM uint32_t TYPER; /*!< \brief Offset: 0x004 (R/ ) Interrupt Controller Type Register */ - __IM uint32_t IIDR; /*!< \brief Offset: 0x008 (R/ ) Distributor Implementer Identification Register */ - RESERVED(0, uint32_t) - __IOM uint32_t STATUSR; /*!< \brief Offset: 0x010 (R/W) Error Reporting Status Register, optional */ - RESERVED(1[11], uint32_t) - __OM uint32_t SETSPI_NSR; /*!< \brief Offset: 0x040 ( /W) Set SPI Register */ - RESERVED(2, uint32_t) - __OM uint32_t CLRSPI_NSR; /*!< \brief Offset: 0x048 ( /W) Clear SPI Register */ - RESERVED(3, uint32_t) - __OM uint32_t SETSPI_SR; /*!< \brief Offset: 0x050 ( /W) Set SPI, Secure Register */ - RESERVED(4, uint32_t) - __OM uint32_t CLRSPI_SR; /*!< \brief Offset: 0x058 ( /W) Clear SPI, Secure Register */ - RESERVED(5[9], uint32_t) - __IOM uint32_t IGROUPR[32]; /*!< \brief Offset: 0x080 (R/W) Interrupt Group Registers */ - __IOM uint32_t ISENABLER[32]; /*!< \brief Offset: 0x100 (R/W) Interrupt Set-Enable Registers */ - __IOM uint32_t ICENABLER[32]; /*!< \brief Offset: 0x180 (R/W) Interrupt Clear-Enable Registers */ - __IOM uint32_t ISPENDR[32]; /*!< \brief Offset: 0x200 (R/W) Interrupt Set-Pending Registers */ - __IOM uint32_t ICPENDR[32]; /*!< \brief Offset: 0x280 (R/W) Interrupt Clear-Pending Registers */ - __IOM uint32_t ISACTIVER[32]; /*!< \brief Offset: 0x300 (R/W) Interrupt Set-Active Registers */ - __IOM uint32_t ICACTIVER[32]; /*!< \brief Offset: 0x380 (R/W) Interrupt Clear-Active Registers */ - __IOM uint32_t IPRIORITYR[255]; /*!< \brief Offset: 0x400 (R/W) Interrupt Priority Registers */ - RESERVED(6, uint32_t) - __IOM uint32_t ITARGETSR[255]; /*!< \brief Offset: 0x800 (R/W) Interrupt Targets Registers */ - RESERVED(7, uint32_t) - __IOM uint32_t ICFGR[64]; /*!< \brief Offset: 0xC00 (R/W) Interrupt Configuration Registers */ - __IOM uint32_t IGRPMODR[32]; /*!< \brief Offset: 0xD00 (R/W) Interrupt Group Modifier Registers */ - RESERVED(8[32], uint32_t) - __IOM uint32_t NSACR[64]; /*!< \brief Offset: 0xE00 (R/W) Non-secure Access Control Registers */ - __OM uint32_t SGIR; /*!< \brief Offset: 0xF00 ( /W) Software Generated Interrupt Register */ - RESERVED(9[3], uint32_t) - __IOM uint32_t CPENDSGIR[4]; /*!< \brief Offset: 0xF10 (R/W) SGI Clear-Pending Registers */ - __IOM uint32_t SPENDSGIR[4]; /*!< \brief Offset: 0xF20 (R/W) SGI Set-Pending Registers */ - RESERVED(10[5236], uint32_t) - __IOM uint64_t IROUTER[988]; /*!< \brief Offset: 0x6100(R/W) Interrupt Routing Registers */ -} GICDistributor_Type; - -#define GICDistributor ((GICDistributor_Type *) GIC_DISTRIBUTOR_BASE ) /*!< \brief GIC Distributor register set access pointer */ - -/* GICDistributor CTLR Register */ -#define GICDistributor_CTLR_EnableGrp0_Pos 0U /*!< GICDistributor CTLR: EnableGrp0 Position */ -#define GICDistributor_CTLR_EnableGrp0_Msk (0x1U /*<< GICDistributor_CTLR_EnableGrp0_Pos*/) /*!< GICDistributor CTLR: EnableGrp0 Mask */ -#define GICDistributor_CTLR_EnableGrp0(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_CTLR_EnableGrp0_Pos*/)) & GICDistributor_CTLR_EnableGrp0_Msk) - -#define GICDistributor_CTLR_EnableGrp1_Pos 1U /*!< GICDistributor CTLR: EnableGrp1 Position */ -#define GICDistributor_CTLR_EnableGrp1_Msk (0x1U << GICDistributor_CTLR_EnableGrp1_Pos) /*!< GICDistributor CTLR: EnableGrp1 Mask */ -#define GICDistributor_CTLR_EnableGrp1(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_CTLR_EnableGrp1_Pos)) & GICDistributor_CTLR_EnableGrp1_Msk) - -#define GICDistributor_CTLR_ARE_Pos 4U /*!< GICDistributor CTLR: ARE Position */ -#define GICDistributor_CTLR_ARE_Msk (0x1U << GICDistributor_CTLR_ARE_Pos) /*!< GICDistributor CTLR: ARE Mask */ -#define GICDistributor_CTLR_ARE(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_CTLR_ARE_Pos)) & GICDistributor_CTLR_ARE_Msk) - -#define GICDistributor_CTLR_DC_Pos 6U /*!< GICDistributor CTLR: DC Position */ -#define GICDistributor_CTLR_DC_Msk (0x1U << GICDistributor_CTLR_DC_Pos) /*!< GICDistributor CTLR: DC Mask */ -#define GICDistributor_CTLR_DC(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_CTLR_DC_Pos)) & GICDistributor_CTLR_DC_Msk) - -#define GICDistributor_CTLR_EINWF_Pos 7U /*!< GICDistributor CTLR: EINWF Position */ -#define GICDistributor_CTLR_EINWF_Msk (0x1U << GICDistributor_CTLR_EINWF_Pos) /*!< GICDistributor CTLR: EINWF Mask */ -#define GICDistributor_CTLR_EINWF(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_CTLR_EINWF_Pos)) & GICDistributor_CTLR_EINWF_Msk) - -#define GICDistributor_CTLR_RWP_Pos 31U /*!< GICDistributor CTLR: RWP Position */ -#define GICDistributor_CTLR_RWP_Msk (0x1U << GICDistributor_CTLR_RWP_Pos) /*!< GICDistributor CTLR: RWP Mask */ -#define GICDistributor_CTLR_RWP(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_CTLR_RWP_Pos)) & GICDistributor_CTLR_RWP_Msk) - -/* GICDistributor TYPER Register */ -#define GICDistributor_TYPER_ITLinesNumber_Pos 0U /*!< GICDistributor TYPER: ITLinesNumber Position */ -#define GICDistributor_TYPER_ITLinesNumber_Msk (0x1FU /*<< GICDistributor_TYPER_ITLinesNumber_Pos*/) /*!< GICDistributor TYPER: ITLinesNumber Mask */ -#define GICDistributor_TYPER_ITLinesNumber(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_TYPER_ITLinesNumber_Pos*/)) & GICDistributor_CTLR_ITLinesNumber_Msk) - -#define GICDistributor_TYPER_CPUNumber_Pos 5U /*!< GICDistributor TYPER: CPUNumber Position */ -#define GICDistributor_TYPER_CPUNumber_Msk (0x7U << GICDistributor_TYPER_CPUNumber_Pos) /*!< GICDistributor TYPER: CPUNumber Mask */ -#define GICDistributor_TYPER_CPUNumber(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_TYPER_CPUNumber_Pos)) & GICDistributor_TYPER_CPUNumber_Msk) - -#define GICDistributor_TYPER_SecurityExtn_Pos 10U /*!< GICDistributor TYPER: SecurityExtn Position */ -#define GICDistributor_TYPER_SecurityExtn_Msk (0x1U << GICDistributor_TYPER_SecurityExtn_Pos) /*!< GICDistributor TYPER: SecurityExtn Mask */ -#define GICDistributor_TYPER_SecurityExtn(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_TYPER_SecurityExtn_Pos)) & GICDistributor_TYPER_SecurityExtn_Msk) - -#define GICDistributor_TYPER_LSPI_Pos 11U /*!< GICDistributor TYPER: LSPI Position */ -#define GICDistributor_TYPER_LSPI_Msk (0x1FU << GICDistributor_TYPER_LSPI_Pos) /*!< GICDistributor TYPER: LSPI Mask */ -#define GICDistributor_TYPER_LSPI(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_TYPER_LSPI_Pos)) & GICDistributor_TYPER_LSPI_Msk) - -/* GICDistributor IIDR Register */ -#define GICDistributor_IIDR_Implementer_Pos 0U /*!< GICDistributor IIDR: Implementer Position */ -#define GICDistributor_IIDR_Implementer_Msk (0xFFFU /*<< GICDistributor_IIDR_Implementer_Pos*/) /*!< GICDistributor IIDR: Implementer Mask */ -#define GICDistributor_IIDR_Implementer(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_IIDR_Implementer_Pos*/)) & GICDistributor_IIDR_Implementer_Msk) - -#define GICDistributor_IIDR_Revision_Pos 12U /*!< GICDistributor IIDR: Revision Position */ -#define GICDistributor_IIDR_Revision_Msk (0xFU << GICDistributor_IIDR_Revision_Pos) /*!< GICDistributor IIDR: Revision Mask */ -#define GICDistributor_IIDR_Revision(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_IIDR_Revision_Pos)) & GICDistributor_IIDR_Revision_Msk) - -#define GICDistributor_IIDR_Variant_Pos 16U /*!< GICDistributor IIDR: Variant Position */ -#define GICDistributor_IIDR_Variant_Msk (0xFU << GICDistributor_IIDR_Variant_Pos) /*!< GICDistributor IIDR: Variant Mask */ -#define GICDistributor_IIDR_Variant(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_IIDR_Variant_Pos)) & GICDistributor_IIDR_Variant_Msk) - -#define GICDistributor_IIDR_ProductID_Pos 24U /*!< GICDistributor IIDR: ProductID Position */ -#define GICDistributor_IIDR_ProductID_Msk (0xFFU << GICDistributor_IIDR_ProductID_Pos) /*!< GICDistributor IIDR: ProductID Mask */ -#define GICDistributor_IIDR_ProductID(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_IIDR_ProductID_Pos)) & GICDistributor_IIDR_ProductID_Msk) - -/* GICDistributor STATUSR Register */ -#define GICDistributor_STATUSR_RRD_Pos 0U /*!< GICDistributor STATUSR: RRD Position */ -#define GICDistributor_STATUSR_RRD_Msk (0x1U /*<< GICDistributor_STATUSR_RRD_Pos*/) /*!< GICDistributor STATUSR: RRD Mask */ -#define GICDistributor_STATUSR_RRD(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_STATUSR_RRD_Pos*/)) & GICDistributor_STATUSR_RRD_Msk) - -#define GICDistributor_STATUSR_WRD_Pos 1U /*!< GICDistributor STATUSR: WRD Position */ -#define GICDistributor_STATUSR_WRD_Msk (0x1U << GICDistributor_STATUSR_WRD_Pos) /*!< GICDistributor STATUSR: WRD Mask */ -#define GICDistributor_STATUSR_WRD(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_STATUSR_WRD_Pos)) & GICDistributor_STATUSR_WRD_Msk) - -#define GICDistributor_STATUSR_RWOD_Pos 2U /*!< GICDistributor STATUSR: RWOD Position */ -#define GICDistributor_STATUSR_RWOD_Msk (0x1U << GICDistributor_STATUSR_RWOD_Pos) /*!< GICDistributor STATUSR: RWOD Mask */ -#define GICDistributor_STATUSR_RWOD(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_STATUSR_RWOD_Pos)) & GICDistributor_STATUSR_RWOD_Msk) - -#define GICDistributor_STATUSR_WROD_Pos 3U /*!< GICDistributor STATUSR: WROD Position */ -#define GICDistributor_STATUSR_WROD_Msk (0x1U << GICDistributor_STATUSR_WROD_Pos) /*!< GICDistributor STATUSR: WROD Mask */ -#define GICDistributor_STATUSR_WROD(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_STATUSR_WROD_Pos)) & GICDistributor_STATUSR_WROD_Msk) - -/* GICDistributor SETSPI_NSR Register */ -#define GICDistributor_SETSPI_NSR_INTID_Pos 0U /*!< GICDistributor SETSPI_NSR: INTID Position */ -#define GICDistributor_SETSPI_NSR_INTID_Msk (0x3FFU /*<< GICDistributor_SETSPI_NSR_INTID_Pos*/) /*!< GICDistributor SETSPI_NSR: INTID Mask */ -#define GICDistributor_SETSPI_NSR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_SETSPI_NSR_INTID_Pos*/)) & GICDistributor_SETSPI_NSR_INTID_Msk) - -/* GICDistributor CLRSPI_NSR Register */ -#define GICDistributor_CLRSPI_NSR_INTID_Pos 0U /*!< GICDistributor CLRSPI_NSR: INTID Position */ -#define GICDistributor_CLRSPI_NSR_INTID_Msk (0x3FFU /*<< GICDistributor_CLRSPI_NSR_INTID_Pos*/) /*!< GICDistributor CLRSPI_NSR: INTID Mask */ -#define GICDistributor_CLRSPI_NSR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_CLRSPI_NSR_INTID_Pos*/)) & GICDistributor_CLRSPI_NSR_INTID_Msk) - -/* GICDistributor SETSPI_SR Register */ -#define GICDistributor_SETSPI_SR_INTID_Pos 0U /*!< GICDistributor SETSPI_SR: INTID Position */ -#define GICDistributor_SETSPI_SR_INTID_Msk (0x3FFU /*<< GICDistributor_SETSPI_SR_INTID_Pos*/) /*!< GICDistributor SETSPI_SR: INTID Mask */ -#define GICDistributor_SETSPI_SR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_SETSPI_SR_INTID_Pos*/)) & GICDistributor_SETSPI_SR_INTID_Msk) - -/* GICDistributor CLRSPI_SR Register */ -#define GICDistributor_CLRSPI_SR_INTID_Pos 0U /*!< GICDistributor CLRSPI_SR: INTID Position */ -#define GICDistributor_CLRSPI_SR_INTID_Msk (0x3FFU /*<< GICDistributor_CLRSPI_SR_INTID_Pos*/) /*!< GICDistributor CLRSPI_SR: INTID Mask */ -#define GICDistributor_CLRSPI_SR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_CLRSPI_SR_INTID_Pos*/)) & GICDistributor_CLRSPI_SR_INTID_Msk) - -/* GICDistributor ITARGETSR Register */ -#define GICDistributor_ITARGETSR_CPU0_Pos 0U /*!< GICDistributor ITARGETSR: CPU0 Position */ -#define GICDistributor_ITARGETSR_CPU0_Msk (0x1U /*<< GICDistributor_ITARGETSR_CPU0_Pos*/) /*!< GICDistributor ITARGETSR: CPU0 Mask */ -#define GICDistributor_ITARGETSR_CPU0(x) (((uint8_t)(((uint8_t)(x)) /*<< GICDistributor_ITARGETSR_CPU0_Pos*/)) & GICDistributor_ITARGETSR_CPU0_Msk) - -#define GICDistributor_ITARGETSR_CPU1_Pos 1U /*!< GICDistributor ITARGETSR: CPU1 Position */ -#define GICDistributor_ITARGETSR_CPU1_Msk (0x1U << GICDistributor_ITARGETSR_CPU1_Pos) /*!< GICDistributor ITARGETSR: CPU1 Mask */ -#define GICDistributor_ITARGETSR_CPU1(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU1_Pos)) & GICDistributor_ITARGETSR_CPU1_Msk) - -#define GICDistributor_ITARGETSR_CPU2_Pos 2U /*!< GICDistributor ITARGETSR: CPU2 Position */ -#define GICDistributor_ITARGETSR_CPU2_Msk (0x1U << GICDistributor_ITARGETSR_CPU2_Pos) /*!< GICDistributor ITARGETSR: CPU2 Mask */ -#define GICDistributor_ITARGETSR_CPU2(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU2_Pos)) & GICDistributor_ITARGETSR_CPU2_Msk) - -#define GICDistributor_ITARGETSR_CPU3_Pos 3U /*!< GICDistributor ITARGETSR: CPU3 Position */ -#define GICDistributor_ITARGETSR_CPU3_Msk (0x1U << GICDistributor_ITARGETSR_CPU3_Pos) /*!< GICDistributor ITARGETSR: CPU3 Mask */ -#define GICDistributor_ITARGETSR_CPU3(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU3_Pos)) & GICDistributor_ITARGETSR_CPU3_Msk) - -#define GICDistributor_ITARGETSR_CPU4_Pos 4U /*!< GICDistributor ITARGETSR: CPU4 Position */ -#define GICDistributor_ITARGETSR_CPU4_Msk (0x1U << GICDistributor_ITARGETSR_CPU4_Pos) /*!< GICDistributor ITARGETSR: CPU4 Mask */ -#define GICDistributor_ITARGETSR_CPU4(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU4_Pos)) & GICDistributor_ITARGETSR_CPU4_Msk) - -#define GICDistributor_ITARGETSR_CPU5_Pos 5U /*!< GICDistributor ITARGETSR: CPU5 Position */ -#define GICDistributor_ITARGETSR_CPU5_Msk (0x1U << GICDistributor_ITARGETSR_CPU5_Pos) /*!< GICDistributor ITARGETSR: CPU5 Mask */ -#define GICDistributor_ITARGETSR_CPU5(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU5_Pos)) & GICDistributor_ITARGETSR_CPU5_Msk) - -#define GICDistributor_ITARGETSR_CPU6_Pos 6U /*!< GICDistributor ITARGETSR: CPU6 Position */ -#define GICDistributor_ITARGETSR_CPU6_Msk (0x1U << GICDistributor_ITARGETSR_CPU6_Pos) /*!< GICDistributor ITARGETSR: CPU6 Mask */ -#define GICDistributor_ITARGETSR_CPU6(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU6_Pos)) & GICDistributor_ITARGETSR_CPU6_Msk) - -#define GICDistributor_ITARGETSR_CPU7_Pos 7U /*!< GICDistributor ITARGETSR: CPU7 Position */ -#define GICDistributor_ITARGETSR_CPU7_Msk (0x1U << GICDistributor_ITARGETSR_CPU7_Pos) /*!< GICDistributor ITARGETSR: CPU7 Mask */ -#define GICDistributor_ITARGETSR_CPU7(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU7_Pos)) & GICDistributor_ITARGETSR_CPU7_Msk) - -/* GICDistributor SGIR Register */ -#define GICDistributor_SGIR_INTID_Pos 0U /*!< GICDistributor SGIR: INTID Position */ -#define GICDistributor_SGIR_INTID_Msk (0x7U /*<< GICDistributor_SGIR_INTID_Pos*/) /*!< GICDistributor SGIR: INTID Mask */ -#define GICDistributor_SGIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_SGIR_INTID_Pos*/)) & GICDistributor_SGIR_INTID_Msk) - -#define GICDistributor_SGIR_NSATT_Pos 15U /*!< GICDistributor SGIR: NSATT Position */ -#define GICDistributor_SGIR_NSATT_Msk (0x1U << GICDistributor_SGIR_NSATT_Pos) /*!< GICDistributor SGIR: NSATT Mask */ -#define GICDistributor_SGIR_NSATT(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_SGIR_NSATT_Pos)) & GICDistributor_SGIR_NSATT_Msk) - -#define GICDistributor_SGIR_CPUTargetList_Pos 16U /*!< GICDistributor SGIR: CPUTargetList Position */ -#define GICDistributor_SGIR_CPUTargetList_Msk (0xFFU << GICDistributor_SGIR_CPUTargetList_Pos) /*!< GICDistributor SGIR: CPUTargetList Mask */ -#define GICDistributor_SGIR_CPUTargetList(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_SGIR_CPUTargetList_Pos)) & GICDistributor_SGIR_CPUTargetList_Msk) - -#define GICDistributor_SGIR_TargetFilterList_Pos 24U /*!< GICDistributor SGIR: TargetFilterList Position */ -#define GICDistributor_SGIR_TargetFilterList_Msk (0x3U << GICDistributor_SGIR_TargetFilterList_Pos) /*!< GICDistributor SGIR: TargetFilterList Mask */ -#define GICDistributor_SGIR_TargetFilterList(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_SGIR_TargetFilterList_Pos)) & GICDistributor_SGIR_TargetFilterList_Msk) - -/* GICDistributor IROUTER Register */ -#define GICDistributor_IROUTER_Aff0_Pos 0UL /*!< GICDistributor IROUTER: Aff0 Position */ -#define GICDistributor_IROUTER_Aff0_Msk (0xFFUL /*<< GICDistributor_IROUTER_Aff0_Pos*/) /*!< GICDistributor IROUTER: Aff0 Mask */ -#define GICDistributor_IROUTER_Aff0(x) (((uint64_t)(((uint64_t)(x)) /*<< GICDistributor_IROUTER_Aff0_Pos*/)) & GICDistributor_IROUTER_Aff0_Msk) - -#define GICDistributor_IROUTER_Aff1_Pos 8UL /*!< GICDistributor IROUTER: Aff1 Position */ -#define GICDistributor_IROUTER_Aff1_Msk (0xFFUL << GICDistributor_IROUTER_Aff1_Pos) /*!< GICDistributor IROUTER: Aff1 Mask */ -#define GICDistributor_IROUTER_Aff1(x) (((uint64_t)(((uint64_t)(x)) << GICDistributor_IROUTER_Aff1_Pos)) & GICDistributor_IROUTER_Aff1_Msk) - -#define GICDistributor_IROUTER_Aff2_Pos 16UL /*!< GICDistributor IROUTER: Aff2 Position */ -#define GICDistributor_IROUTER_Aff2_Msk (0xFFUL << GICDistributor_IROUTER_Aff2_Pos) /*!< GICDistributor IROUTER: Aff2 Mask */ -#define GICDistributor_IROUTER_Aff2(x) (((uint64_t)(((uint64_t)(x)) << GICDistributor_IROUTER_Aff2_Pos)) & GICDistributor_IROUTER_Aff2_Msk) - -#define GICDistributor_IROUTER_IRM_Pos 31UL /*!< GICDistributor IROUTER: IRM Position */ -#define GICDistributor_IROUTER_IRM_Msk (0xFFUL << GICDistributor_IROUTER_IRM_Pos) /*!< GICDistributor IROUTER: IRM Mask */ -#define GICDistributor_IROUTER_IRM(x) (((uint64_t)(((uint64_t)(x)) << GICDistributor_IROUTER_IRM_Pos)) & GICDistributor_IROUTER_IRM_Msk) - -#define GICDistributor_IROUTER_Aff3_Pos 32UL /*!< GICDistributor IROUTER: Aff3 Position */ -#define GICDistributor_IROUTER_Aff3_Msk (0xFFUL << GICDistributor_IROUTER_Aff3_Pos) /*!< GICDistributor IROUTER: Aff3 Mask */ -#define GICDistributor_IROUTER_Aff3(x) (((uint64_t)(((uint64_t)(x)) << GICDistributor_IROUTER_Aff3_Pos)) & GICDistributor_IROUTER_Aff3_Msk) - - - -/** \brief Structure type to access the Generic Interrupt Controller Interface (GICC) -*/ -typedef struct -{ - __IOM uint32_t CTLR; /*!< \brief Offset: 0x000 (R/W) CPU Interface Control Register */ - __IOM uint32_t PMR; /*!< \brief Offset: 0x004 (R/W) Interrupt Priority Mask Register */ - __IOM uint32_t BPR; /*!< \brief Offset: 0x008 (R/W) Binary Point Register */ - __IM uint32_t IAR; /*!< \brief Offset: 0x00C (R/ ) Interrupt Acknowledge Register */ - __OM uint32_t EOIR; /*!< \brief Offset: 0x010 ( /W) End Of Interrupt Register */ - __IM uint32_t RPR; /*!< \brief Offset: 0x014 (R/ ) Running Priority Register */ - __IM uint32_t HPPIR; /*!< \brief Offset: 0x018 (R/ ) Highest Priority Pending Interrupt Register */ - __IOM uint32_t ABPR; /*!< \brief Offset: 0x01C (R/W) Aliased Binary Point Register */ - __IM uint32_t AIAR; /*!< \brief Offset: 0x020 (R/ ) Aliased Interrupt Acknowledge Register */ - __OM uint32_t AEOIR; /*!< \brief Offset: 0x024 ( /W) Aliased End Of Interrupt Register */ - __IM uint32_t AHPPIR; /*!< \brief Offset: 0x028 (R/ ) Aliased Highest Priority Pending Interrupt Register */ - __IOM uint32_t STATUSR; /*!< \brief Offset: 0x02C (R/W) Error Reporting Status Register, optional */ - RESERVED(1[40], uint32_t) - __IOM uint32_t APR[4]; /*!< \brief Offset: 0x0D0 (R/W) Active Priority Register */ - __IOM uint32_t NSAPR[4]; /*!< \brief Offset: 0x0E0 (R/W) Non-secure Active Priority Register */ - RESERVED(2[3], uint32_t) - __IM uint32_t IIDR; /*!< \brief Offset: 0x0FC (R/ ) CPU Interface Identification Register */ - RESERVED(3[960], uint32_t) - __OM uint32_t DIR; /*!< \brief Offset: 0x1000( /W) Deactivate Interrupt Register */ -} GICInterface_Type; - -#define GICInterface ((GICInterface_Type *) GIC_INTERFACE_BASE ) /*!< \brief GIC Interface register set access pointer */ - -/* GICInterface CTLR Register */ -#define GICInterface_CTLR_Enable_Pos 0U /*!< PTIM CTLR: Enable Position */ -#define GICInterface_CTLR_Enable_Msk (0x1U /*<< GICInterface_CTLR_Enable_Pos*/) /*!< PTIM CTLR: Enable Mask */ -#define GICInterface_CTLR_Enable(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_CTLR_Enable_Pos*/)) & GICInterface_CTLR_Enable_Msk) - -/* GICInterface PMR Register */ -#define GICInterface_PMR_Priority_Pos 0U /*!< PTIM PMR: Priority Position */ -#define GICInterface_PMR_Priority_Msk (0xFFU /*<< GICInterface_PMR_Priority_Pos*/) /*!< PTIM PMR: Priority Mask */ -#define GICInterface_PMR_Priority(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_PMR_Priority_Pos*/)) & GICInterface_PMR_Priority_Msk) - -/* GICInterface BPR Register */ -#define GICInterface_BPR_Binary_Point_Pos 0U /*!< PTIM BPR: Binary_Point Position */ -#define GICInterface_BPR_Binary_Point_Msk (0x7U /*<< GICInterface_BPR_Binary_Point_Pos*/) /*!< PTIM BPR: Binary_Point Mask */ -#define GICInterface_BPR_Binary_Point(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_BPR_Binary_Point_Pos*/)) & GICInterface_BPR_Binary_Point_Msk) - -/* GICInterface IAR Register */ -#define GICInterface_IAR_INTID_Pos 0U /*!< PTIM IAR: INTID Position */ -#define GICInterface_IAR_INTID_Msk (0xFFFFFFU /*<< GICInterface_IAR_INTID_Pos*/) /*!< PTIM IAR: INTID Mask */ -#define GICInterface_IAR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_IAR_INTID_Pos*/)) & GICInterface_IAR_INTID_Msk) - -/* GICInterface EOIR Register */ -#define GICInterface_EOIR_INTID_Pos 0U /*!< PTIM EOIR: INTID Position */ -#define GICInterface_EOIR_INTID_Msk (0xFFFFFFU /*<< GICInterface_EOIR_INTID_Pos*/) /*!< PTIM EOIR: INTID Mask */ -#define GICInterface_EOIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_EOIR_INTID_Pos*/)) & GICInterface_EOIR_INTID_Msk) - -/* GICInterface RPR Register */ -#define GICInterface_RPR_INTID_Pos 0U /*!< PTIM RPR: INTID Position */ -#define GICInterface_RPR_INTID_Msk (0xFFU /*<< GICInterface_RPR_INTID_Pos*/) /*!< PTIM RPR: INTID Mask */ -#define GICInterface_RPR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_RPR_INTID_Pos*/)) & GICInterface_RPR_INTID_Msk) - -/* GICInterface HPPIR Register */ -#define GICInterface_HPPIR_INTID_Pos 0U /*!< PTIM HPPIR: INTID Position */ -#define GICInterface_HPPIR_INTID_Msk (0xFFFFFFU /*<< GICInterface_HPPIR_INTID_Pos*/) /*!< PTIM HPPIR: INTID Mask */ -#define GICInterface_HPPIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_HPPIR_INTID_Pos*/)) & GICInterface_HPPIR_INTID_Msk) - -/* GICInterface ABPR Register */ -#define GICInterface_ABPR_Binary_Point_Pos 0U /*!< PTIM ABPR: Binary_Point Position */ -#define GICInterface_ABPR_Binary_Point_Msk (0x7U /*<< GICInterface_ABPR_Binary_Point_Pos*/) /*!< PTIM ABPR: Binary_Point Mask */ -#define GICInterface_ABPR_Binary_Point(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_ABPR_Binary_Point_Pos*/)) & GICInterface_ABPR_Binary_Point_Msk) - -/* GICInterface AIAR Register */ -#define GICInterface_AIAR_INTID_Pos 0U /*!< PTIM AIAR: INTID Position */ -#define GICInterface_AIAR_INTID_Msk (0xFFFFFFU /*<< GICInterface_AIAR_INTID_Pos*/) /*!< PTIM AIAR: INTID Mask */ -#define GICInterface_AIAR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_AIAR_INTID_Pos*/)) & GICInterface_AIAR_INTID_Msk) - -/* GICInterface AEOIR Register */ -#define GICInterface_AEOIR_INTID_Pos 0U /*!< PTIM AEOIR: INTID Position */ -#define GICInterface_AEOIR_INTID_Msk (0xFFFFFFU /*<< GICInterface_AEOIR_INTID_Pos*/) /*!< PTIM AEOIR: INTID Mask */ -#define GICInterface_AEOIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_AEOIR_INTID_Pos*/)) & GICInterface_AEOIR_INTID_Msk) - -/* GICInterface AHPPIR Register */ -#define GICInterface_AHPPIR_INTID_Pos 0U /*!< PTIM AHPPIR: INTID Position */ -#define GICInterface_AHPPIR_INTID_Msk (0xFFFFFFU /*<< GICInterface_AHPPIR_INTID_Pos*/) /*!< PTIM AHPPIR: INTID Mask */ -#define GICInterface_AHPPIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_AHPPIR_INTID_Pos*/)) & GICInterface_AHPPIR_INTID_Msk) - -/* GICInterface STATUSR Register */ -#define GICInterface_STATUSR_RRD_Pos 0U /*!< GICInterface STATUSR: RRD Position */ -#define GICInterface_STATUSR_RRD_Msk (0x1U /*<< GICInterface_STATUSR_RRD_Pos*/) /*!< GICInterface STATUSR: RRD Mask */ -#define GICInterface_STATUSR_RRD(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_STATUSR_RRD_Pos*/)) & GICInterface_STATUSR_RRD_Msk) - -#define GICInterface_STATUSR_WRD_Pos 1U /*!< GICInterface STATUSR: WRD Position */ -#define GICInterface_STATUSR_WRD_Msk (0x1U << GICInterface_STATUSR_WRD_Pos) /*!< GICInterface STATUSR: WRD Mask */ -#define GICInterface_STATUSR_WRD(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_STATUSR_WRD_Pos)) & GICInterface_STATUSR_WRD_Msk) - -#define GICInterface_STATUSR_RWOD_Pos 2U /*!< GICInterface STATUSR: RWOD Position */ -#define GICInterface_STATUSR_RWOD_Msk (0x1U << GICInterface_STATUSR_RWOD_Pos) /*!< GICInterface STATUSR: RWOD Mask */ -#define GICInterface_STATUSR_RWOD(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_STATUSR_RWOD_Pos)) & GICInterface_STATUSR_RWOD_Msk) - -#define GICInterface_STATUSR_WROD_Pos 3U /*!< GICInterface STATUSR: WROD Position */ -#define GICInterface_STATUSR_WROD_Msk (0x1U << GICInterface_STATUSR_WROD_Pos) /*!< GICInterface STATUSR: WROD Mask */ -#define GICInterface_STATUSR_WROD(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_STATUSR_WROD_Pos)) & GICInterface_STATUSR_WROD_Msk) - -#define GICInterface_STATUSR_ASV_Pos 4U /*!< GICInterface STATUSR: ASV Position */ -#define GICInterface_STATUSR_ASV_Msk (0x1U << GICInterface_STATUSR_ASV_Pos) /*!< GICInterface STATUSR: ASV Mask */ -#define GICInterface_STATUSR_ASV(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_STATUSR_ASV_Pos)) & GICInterface_STATUSR_ASV_Msk) - -/* GICInterface IIDR Register */ -#define GICInterface_IIDR_Implementer_Pos 0U /*!< GICInterface IIDR: Implementer Position */ -#define GICInterface_IIDR_Implementer_Msk (0xFFFU /*<< GICInterface_IIDR_Implementer_Pos*/) /*!< GICInterface IIDR: Implementer Mask */ -#define GICInterface_IIDR_Implementer(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_IIDR_Implementer_Pos*/)) & GICInterface_IIDR_Implementer_Msk) - -#define GICInterface_IIDR_Revision_Pos 12U /*!< GICInterface IIDR: Revision Position */ -#define GICInterface_IIDR_Revision_Msk (0xFU << GICInterface_IIDR_Revision_Pos) /*!< GICInterface IIDR: Revision Mask */ -#define GICInterface_IIDR_Revision(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_IIDR_Revision_Pos)) & GICInterface_IIDR_Revision_Msk) - -#define GICInterface_IIDR_Arch_version_Pos 16U /*!< GICInterface IIDR: Arch_version Position */ -#define GICInterface_IIDR_Arch_version_Msk (0xFU << GICInterface_IIDR_Arch_version_Pos) /*!< GICInterface IIDR: Arch_version Mask */ -#define GICInterface_IIDR_Arch_version(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_IIDR_Arch_version_Pos)) & GICInterface_IIDR_Arch_version_Msk) - -#define GICInterface_IIDR_ProductID_Pos 20U /*!< GICInterface IIDR: ProductID Position */ -#define GICInterface_IIDR_ProductID_Msk (0xFFFU << GICInterface_IIDR_ProductID_Pos) /*!< GICInterface IIDR: ProductID Mask */ -#define GICInterface_IIDR_ProductID(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_IIDR_ProductID_Pos)) & GICInterface_IIDR_ProductID_Msk) - -/* GICInterface DIR Register */ -#define GICInterface_DIR_INTID_Pos 0U /*!< PTIM DIR: INTID Position */ -#define GICInterface_DIR_INTID_Msk (0xFFFFFFU /*<< GICInterface_DIR_INTID_Pos*/) /*!< PTIM DIR: INTID Mask */ -#define GICInterface_DIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_DIR_INTID_Pos*/)) & GICInterface_DIR_INTID_Msk) -#endif /* (__GIC_PRESENT == 1U) || defined(DOXYGEN) */ - -#if (__TIM_PRESENT == 1U) || defined(DOXYGEN) -#if ((__CORTEX_A == 5U) || (__CORTEX_A == 9U)) || defined(DOXYGEN) -/** \brief Structure type to access the Private Timer -*/ -typedef struct -{ - __IOM uint32_t LOAD; //!< \brief Offset: 0x000 (R/W) Private Timer Load Register - __IOM uint32_t COUNTER; //!< \brief Offset: 0x004 (R/W) Private Timer Counter Register - __IOM uint32_t CONTROL; //!< \brief Offset: 0x008 (R/W) Private Timer Control Register - __IOM uint32_t ISR; //!< \brief Offset: 0x00C (R/W) Private Timer Interrupt Status Register - RESERVED(0[4], uint32_t) - __IOM uint32_t WLOAD; //!< \brief Offset: 0x020 (R/W) Watchdog Load Register - __IOM uint32_t WCOUNTER; //!< \brief Offset: 0x024 (R/W) Watchdog Counter Register - __IOM uint32_t WCONTROL; //!< \brief Offset: 0x028 (R/W) Watchdog Control Register - __IOM uint32_t WISR; //!< \brief Offset: 0x02C (R/W) Watchdog Interrupt Status Register - __IOM uint32_t WRESET; //!< \brief Offset: 0x030 (R/W) Watchdog Reset Status Register - __OM uint32_t WDISABLE; //!< \brief Offset: 0x034 ( /W) Watchdog Disable Register -} Timer_Type; -#define PTIM ((Timer_Type *) TIMER_BASE ) /*!< \brief Timer register struct */ - -/* PTIM Control Register */ -#define PTIM_CONTROL_Enable_Pos 0U /*!< PTIM CONTROL: Enable Position */ -#define PTIM_CONTROL_Enable_Msk (0x1U /*<< PTIM_CONTROL_Enable_Pos*/) /*!< PTIM CONTROL: Enable Mask */ -#define PTIM_CONTROL_Enable(x) (((uint32_t)(((uint32_t)(x)) /*<< PTIM_CONTROL_Enable_Pos*/)) & PTIM_CONTROL_Enable_Msk) - -#define PTIM_CONTROL_AutoReload_Pos 1U /*!< PTIM CONTROL: Auto Reload Position */ -#define PTIM_CONTROL_AutoReload_Msk (0x1U << PTIM_CONTROL_AutoReload_Pos) /*!< PTIM CONTROL: Auto Reload Mask */ -#define PTIM_CONTROL_AutoReload(x) (((uint32_t)(((uint32_t)(x)) << PTIM_CONTROL_AutoReload_Pos)) & PTIM_CONTROL_AutoReload_Msk) - -#define PTIM_CONTROL_IRQenable_Pos 2U /*!< PTIM CONTROL: IRQ Enabel Position */ -#define PTIM_CONTROL_IRQenable_Msk (0x1U << PTIM_CONTROL_IRQenable_Pos) /*!< PTIM CONTROL: IRQ Enabel Mask */ -#define PTIM_CONTROL_IRQenable(x) (((uint32_t)(((uint32_t)(x)) << PTIM_CONTROL_IRQenable_Pos)) & PTIM_CONTROL_IRQenable_Msk) - -#define PTIM_CONTROL_Prescaler_Pos 8U /*!< PTIM CONTROL: Prescaler Position */ -#define PTIM_CONTROL_Prescaler_Msk (0xFFU << PTIM_CONTROL_Prescaler_Pos) /*!< PTIM CONTROL: Prescaler Mask */ -#define PTIM_CONTROL_Prescaler(x) (((uint32_t)(((uint32_t)(x)) << PTIM_CONTROL_Prescaler_Pos)) & PTIM_CONTROL_Prescaler_Msk) - -/* WCONTROL Watchdog Control Register */ -#define PTIM_WCONTROL_Enable_Pos 0U /*!< PTIM WCONTROL: Enable Position */ -#define PTIM_WCONTROL_Enable_Msk (0x1U /*<< PTIM_WCONTROL_Enable_Pos*/) /*!< PTIM WCONTROL: Enable Mask */ -#define PTIM_WCONTROL_Enable(x) (((uint32_t)(((uint32_t)(x)) /*<< PTIM_WCONTROL_Enable_Pos*/)) & PTIM_WCONTROL_Enable_Msk) - -#define PTIM_WCONTROL_AutoReload_Pos 1U /*!< PTIM WCONTROL: Auto Reload Position */ -#define PTIM_WCONTROL_AutoReload_Msk (0x1U << PTIM_WCONTROL_AutoReload_Pos) /*!< PTIM WCONTROL: Auto Reload Mask */ -#define PTIM_WCONTROL_AutoReload(x) (((uint32_t)(((uint32_t)(x)) << PTIM_WCONTROL_AutoReload_Pos)) & PTIM_WCONTROL_AutoReload_Msk) - -#define PTIM_WCONTROL_IRQenable_Pos 2U /*!< PTIM WCONTROL: IRQ Enable Position */ -#define PTIM_WCONTROL_IRQenable_Msk (0x1U << PTIM_WCONTROL_IRQenable_Pos) /*!< PTIM WCONTROL: IRQ Enable Mask */ -#define PTIM_WCONTROL_IRQenable(x) (((uint32_t)(((uint32_t)(x)) << PTIM_WCONTROL_IRQenable_Pos)) & PTIM_WCONTROL_IRQenable_Msk) - -#define PTIM_WCONTROL_Mode_Pos 3U /*!< PTIM WCONTROL: Watchdog Mode Position */ -#define PTIM_WCONTROL_Mode_Msk (0x1U << PTIM_WCONTROL_Mode_Pos) /*!< PTIM WCONTROL: Watchdog Mode Mask */ -#define PTIM_WCONTROL_Mode(x) (((uint32_t)(((uint32_t)(x)) << PTIM_WCONTROL_Mode_Pos)) & PTIM_WCONTROL_Mode_Msk) - -#define PTIM_WCONTROL_Presacler_Pos 8U /*!< PTIM WCONTROL: Prescaler Position */ -#define PTIM_WCONTROL_Presacler_Msk (0xFFU << PTIM_WCONTROL_Presacler_Pos) /*!< PTIM WCONTROL: Prescaler Mask */ -#define PTIM_WCONTROL_Presacler(x) (((uint32_t)(((uint32_t)(x)) << PTIM_WCONTROL_Presacler_Pos)) & PTIM_WCONTROL_Presacler_Msk) - -/* WISR Watchdog Interrupt Status Register */ -#define PTIM_WISR_EventFlag_Pos 0U /*!< PTIM WISR: Event Flag Position */ -#define PTIM_WISR_EventFlag_Msk (0x1U /*<< PTIM_WISR_EventFlag_Pos*/) /*!< PTIM WISR: Event Flag Mask */ -#define PTIM_WISR_EventFlag(x) (((uint32_t)(((uint32_t)(x)) /*<< PTIM_WISR_EventFlag_Pos*/)) & PTIM_WISR_EventFlag_Msk) - -/* WRESET Watchdog Reset Status */ -#define PTIM_WRESET_ResetFlag_Pos 0U /*!< PTIM WRESET: Reset Flag Position */ -#define PTIM_WRESET_ResetFlag_Msk (0x1U /*<< PTIM_WRESET_ResetFlag_Pos*/) /*!< PTIM WRESET: Reset Flag Mask */ -#define PTIM_WRESET_ResetFlag(x) (((uint32_t)(((uint32_t)(x)) /*<< PTIM_WRESET_ResetFlag_Pos*/)) & PTIM_WRESET_ResetFlag_Msk) - -#endif /* ((__CORTEX_A == 5U) || (__CORTEX_A == 9U)) || defined(DOXYGEN) */ -#endif /* (__TIM_PRESENT == 1U) || defined(DOXYGEN) */ - - /******************************************************************************* - * Hardware Abstraction Layer - Core Function Interface contains: - - L1 Cache Functions - - L2C-310 Cache Controller Functions - - PL1 Timer Functions - - GIC Functions - - MMU Functions - ******************************************************************************/ - -/* ########################## L1 Cache functions ################################# */ - -/** \brief Enable Caches by setting I and C bits in SCTLR register. -*/ -__STATIC_FORCEINLINE void L1C_EnableCaches(void) { - __set_SCTLR( __get_SCTLR() | SCTLR_I_Msk | SCTLR_C_Msk); - __ISB(); -} - -/** \brief Disable Caches by clearing I and C bits in SCTLR register. -*/ -__STATIC_FORCEINLINE void L1C_DisableCaches(void) { - __set_SCTLR( __get_SCTLR() & (~SCTLR_I_Msk) & (~SCTLR_C_Msk)); - __ISB(); -} - -/** \brief Enable Branch Prediction by setting Z bit in SCTLR register. -*/ -__STATIC_FORCEINLINE void L1C_EnableBTAC(void) { - __set_SCTLR( __get_SCTLR() | SCTLR_Z_Msk); - __ISB(); -} - -/** \brief Disable Branch Prediction by clearing Z bit in SCTLR register. -*/ -__STATIC_FORCEINLINE void L1C_DisableBTAC(void) { - __set_SCTLR( __get_SCTLR() & (~SCTLR_Z_Msk)); - __ISB(); -} - -/** \brief Invalidate entire branch predictor array -*/ -__STATIC_FORCEINLINE void L1C_InvalidateBTAC(void) { - __set_BPIALL(0); - __DSB(); //ensure completion of the invalidation - __ISB(); //ensure instruction fetch path sees new state -} - -/** \brief Clean instruction cache line by address. -* \param [in] va Pointer to instructions to clear the cache for. -*/ -__STATIC_FORCEINLINE void L1C_InvalidateICacheMVA(void *va) { - __set_ICIMVAC((uint32_t)va); - __DSB(); //ensure completion of the invalidation - __ISB(); //ensure instruction fetch path sees new I cache state -} - -/** \brief Invalidate the whole instruction cache -*/ -__STATIC_FORCEINLINE void L1C_InvalidateICacheAll(void) { - __set_ICIALLU(0); - __DSB(); //ensure completion of the invalidation - __ISB(); //ensure instruction fetch path sees new I cache state -} - -/** \brief Clean data cache line by address. -* \param [in] va Pointer to data to clear the cache for. -*/ -__STATIC_FORCEINLINE void L1C_CleanDCacheMVA(void *va) { - __set_DCCMVAC((uint32_t)va); - __DMB(); //ensure the ordering of data cache maintenance operations and their effects -} - -/** \brief Invalidate data cache line by address. -* \param [in] va Pointer to data to invalidate the cache for. -*/ -__STATIC_FORCEINLINE void L1C_InvalidateDCacheMVA(void *va) { - __set_DCIMVAC((uint32_t)va); - __DMB(); //ensure the ordering of data cache maintenance operations and their effects -} - -/** \brief Clean and Invalidate data cache by address. -* \param [in] va Pointer to data to invalidate the cache for. -*/ -__STATIC_FORCEINLINE void L1C_CleanInvalidateDCacheMVA(void *va) { - __set_DCCIMVAC((uint32_t)va); - __DMB(); //ensure the ordering of data cache maintenance operations and their effects -} - -/** \brief Calculate log2 rounded up -* - log(0) => 0 -* - log(1) => 0 -* - log(2) => 1 -* - log(3) => 2 -* - log(4) => 2 -* - log(5) => 3 -* : : -* - log(16) => 4 -* - log(32) => 5 -* : : -* \param [in] n input value parameter -* \return log2(n) -*/ -__STATIC_FORCEINLINE uint8_t __log2_up(uint32_t n) -{ - if (n < 2U) { - return 0U; - } - uint8_t log = 0U; - uint32_t t = n; - while(t > 1U) - { - log++; - t >>= 1U; - } - if (n & 1U) { log++; } - return log; -} - -/** \brief Apply cache maintenance to given cache level. -* \param [in] level cache level to be maintained -* \param [in] maint 0 - invalidate, 1 - clean, otherwise - invalidate and clean -*/ -__STATIC_FORCEINLINE void __L1C_MaintainDCacheSetWay(uint32_t level, uint32_t maint) -{ - uint32_t Dummy; - uint32_t ccsidr; - uint32_t num_sets; - uint32_t num_ways; - uint32_t shift_way; - uint32_t log2_linesize; - int32_t log2_num_ways; - - Dummy = level << 1U; - /* set csselr, select ccsidr register */ - __set_CSSELR(Dummy); - /* get current ccsidr register */ - ccsidr = __get_CCSIDR(); - num_sets = ((ccsidr & 0x0FFFE000U) >> 13U) + 1U; - num_ways = ((ccsidr & 0x00001FF8U) >> 3U) + 1U; - log2_linesize = (ccsidr & 0x00000007U) + 2U + 2U; - log2_num_ways = __log2_up(num_ways); - if ((log2_num_ways < 0) || (log2_num_ways > 32)) { - return; // FATAL ERROR - } - shift_way = 32U - (uint32_t)log2_num_ways; - for(int32_t way = num_ways-1; way >= 0; way--) - { - for(int32_t set = num_sets-1; set >= 0; set--) - { - Dummy = (level << 1U) | (((uint32_t)set) << log2_linesize) | (((uint32_t)way) << shift_way); - switch (maint) - { - case 0U: __set_DCISW(Dummy); break; - case 1U: __set_DCCSW(Dummy); break; - default: __set_DCCISW(Dummy); break; - } - } - } - __DMB(); -} - -/** \brief Clean and Invalidate the entire data or unified cache -* Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency -* \param [in] op 0 - invalidate, 1 - clean, otherwise - invalidate and clean -*/ -__STATIC_FORCEINLINE void L1C_CleanInvalidateCache(uint32_t op) { - uint32_t clidr; - uint32_t cache_type; - clidr = __get_CLIDR(); - for(uint32_t i = 0U; i<7U; i++) - { - cache_type = (clidr >> i*3U) & 0x7UL; - if ((cache_type >= 2U) && (cache_type <= 4U)) - { - __L1C_MaintainDCacheSetWay(i, op); - } - } -} - -/** \brief Clean and Invalidate the entire data or unified cache -* Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency -* \param [in] op 0 - invalidate, 1 - clean, otherwise - invalidate and clean -* \deprecated Use generic L1C_CleanInvalidateCache instead. -*/ -CMSIS_DEPRECATED -__STATIC_FORCEINLINE void __L1C_CleanInvalidateCache(uint32_t op) { - L1C_CleanInvalidateCache(op); -} - -/** \brief Invalidate the whole data cache. -*/ -__STATIC_FORCEINLINE void L1C_InvalidateDCacheAll(void) { - L1C_CleanInvalidateCache(0); -} - -/** \brief Clean the whole data cache. - */ -__STATIC_FORCEINLINE void L1C_CleanDCacheAll(void) { - L1C_CleanInvalidateCache(1); -} - -/** \brief Clean and invalidate the whole data cache. - */ -__STATIC_FORCEINLINE void L1C_CleanInvalidateDCacheAll(void) { - L1C_CleanInvalidateCache(2); -} - -/* ########################## L2 Cache functions ################################# */ -#if (__L2C_PRESENT == 1U) || defined(DOXYGEN) -/** \brief Cache Sync operation by writing CACHE_SYNC register. -*/ -__STATIC_INLINE void L2C_Sync(void) -{ - L2C_310->CACHE_SYNC = 0x0; -} - -/** \brief Read cache controller cache ID from CACHE_ID register. - * \return L2C_310_TypeDef::CACHE_ID - */ -__STATIC_INLINE int L2C_GetID (void) -{ - return L2C_310->CACHE_ID; -} - -/** \brief Read cache controller cache type from CACHE_TYPE register. -* \return L2C_310_TypeDef::CACHE_TYPE -*/ -__STATIC_INLINE int L2C_GetType (void) -{ - return L2C_310->CACHE_TYPE; -} - -/** \brief Invalidate all cache by way -*/ -__STATIC_INLINE void L2C_InvAllByWay (void) -{ - unsigned int assoc; - - if (L2C_310->AUX_CNT & (1U << 16U)) { - assoc = 16U; - } else { - assoc = 8U; - } - - L2C_310->INV_WAY = (1U << assoc) - 1U; - while(L2C_310->INV_WAY & ((1U << assoc) - 1U)); //poll invalidate - - L2C_Sync(); -} - -/** \brief Clean and Invalidate all cache by way -*/ -__STATIC_INLINE void L2C_CleanInvAllByWay (void) -{ - unsigned int assoc; - - if (L2C_310->AUX_CNT & (1U << 16U)) { - assoc = 16U; - } else { - assoc = 8U; - } - - L2C_310->CLEAN_INV_WAY = (1U << assoc) - 1U; - while(L2C_310->CLEAN_INV_WAY & ((1U << assoc) - 1U)); //poll invalidate - - L2C_Sync(); -} - -/** \brief Enable Level 2 Cache -*/ -__STATIC_INLINE void L2C_Enable(void) -{ - L2C_310->CONTROL = 0; - L2C_310->INTERRUPT_CLEAR = 0x000001FFuL; - L2C_310->DEBUG_CONTROL = 0; - L2C_310->DATA_LOCK_0_WAY = 0; - L2C_310->CACHE_SYNC = 0; - L2C_310->CONTROL = 0x01; - L2C_Sync(); -} - -/** \brief Disable Level 2 Cache -*/ -__STATIC_INLINE void L2C_Disable(void) -{ - L2C_310->CONTROL = 0x00; - L2C_Sync(); -} - -/** \brief Invalidate cache by physical address -* \param [in] pa Pointer to data to invalidate cache for. -*/ -__STATIC_INLINE void L2C_InvPa (void *pa) -{ - L2C_310->INV_LINE_PA = (unsigned int)pa; - L2C_Sync(); -} - -/** \brief Clean cache by physical address -* \param [in] pa Pointer to data to invalidate cache for. -*/ -__STATIC_INLINE void L2C_CleanPa (void *pa) -{ - L2C_310->CLEAN_LINE_PA = (unsigned int)pa; - L2C_Sync(); -} - -/** \brief Clean and invalidate cache by physical address -* \param [in] pa Pointer to data to invalidate cache for. -*/ -__STATIC_INLINE void L2C_CleanInvPa (void *pa) -{ - L2C_310->CLEAN_INV_LINE_PA = (unsigned int)pa; - L2C_Sync(); -} -#endif - -/* ########################## GIC functions ###################################### */ -#if (__GIC_PRESENT == 1U) || defined(DOXYGEN) - -/** \brief Enable the interrupt distributor using the GIC's CTLR register. -*/ -__STATIC_INLINE void GIC_EnableDistributor(void) -{ - GICDistributor->CTLR |= 1U; -} - -/** \brief Disable the interrupt distributor using the GIC's CTLR register. -*/ -__STATIC_INLINE void GIC_DisableDistributor(void) -{ - GICDistributor->CTLR &=~1U; -} - -/** \brief Read the GIC's TYPER register. -* \return GICDistributor_Type::TYPER -*/ -__STATIC_INLINE uint32_t GIC_DistributorInfo(void) -{ - return (GICDistributor->TYPER); -} - -/** \brief Reads the GIC's IIDR register. -* \return GICDistributor_Type::IIDR -*/ -__STATIC_INLINE uint32_t GIC_DistributorImplementer(void) -{ - return (GICDistributor->IIDR); -} - -/** \brief Sets the GIC's ITARGETSR register for the given interrupt. -* \param [in] IRQn Interrupt to be configured. -* \param [in] cpu_target CPU interfaces to assign this interrupt to. -*/ -__STATIC_INLINE void GIC_SetTarget(IRQn_Type IRQn, uint32_t cpu_target) -{ - uint32_t mask = GICDistributor->ITARGETSR[IRQn / 4U] & ~(0xFFUL << ((IRQn % 4U) * 8U)); - GICDistributor->ITARGETSR[IRQn / 4U] = mask | ((cpu_target & 0xFFUL) << ((IRQn % 4U) * 8U)); -} - -/** \brief Read the GIC's ITARGETSR register. -* \param [in] IRQn Interrupt to acquire the configuration for. -* \return GICDistributor_Type::ITARGETSR -*/ -__STATIC_INLINE uint32_t GIC_GetTarget(IRQn_Type IRQn) -{ - return (GICDistributor->ITARGETSR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL; -} - -/** \brief Enable the CPU's interrupt interface. -*/ -__STATIC_INLINE void GIC_EnableInterface(void) -{ - GICInterface->CTLR |= 1U; //enable interface -} - -/** \brief Disable the CPU's interrupt interface. -*/ -__STATIC_INLINE void GIC_DisableInterface(void) -{ - GICInterface->CTLR &=~1U; //disable distributor -} - -/** \brief Read the CPU's IAR register. -* \return GICInterface_Type::IAR -*/ -__STATIC_INLINE IRQn_Type GIC_AcknowledgePending(void) -{ - return (IRQn_Type)(GICInterface->IAR); -} - -/** \brief Writes the given interrupt number to the CPU's EOIR register. -* \param [in] IRQn The interrupt to be signaled as finished. -*/ -__STATIC_INLINE void GIC_EndInterrupt(IRQn_Type IRQn) -{ - GICInterface->EOIR = IRQn; -} - -/** \brief Enables the given interrupt using GIC's ISENABLER register. -* \param [in] IRQn The interrupt to be enabled. -*/ -__STATIC_INLINE void GIC_EnableIRQ(IRQn_Type IRQn) -{ - GICDistributor->ISENABLER[IRQn / 32U] = 1U << (IRQn % 32U); -} - -/** \brief Get interrupt enable status using GIC's ISENABLER register. -* \param [in] IRQn The interrupt to be queried. -* \return 0 - interrupt is not enabled, 1 - interrupt is enabled. -*/ -__STATIC_INLINE uint32_t GIC_GetEnableIRQ(IRQn_Type IRQn) -{ - return (GICDistributor->ISENABLER[IRQn / 32U] >> (IRQn % 32U)) & 1UL; -} - -/** \brief Disables the given interrupt using GIC's ICENABLER register. -* \param [in] IRQn The interrupt to be disabled. -*/ -__STATIC_INLINE void GIC_DisableIRQ(IRQn_Type IRQn) -{ - GICDistributor->ICENABLER[IRQn / 32U] = 1U << (IRQn % 32U); -} - -/** \brief Get interrupt pending status from GIC's ISPENDR register. -* \param [in] IRQn The interrupt to be queried. -* \return 0 - interrupt is not pending, 1 - interrupt is pendig. -*/ -__STATIC_INLINE uint32_t GIC_GetPendingIRQ(IRQn_Type IRQn) -{ - uint32_t pend; - - if (IRQn >= 16U) { - pend = (GICDistributor->ISPENDR[IRQn / 32U] >> (IRQn % 32U)) & 1UL; - } else { - // INTID 0-15 Software Generated Interrupt - pend = (GICDistributor->SPENDSGIR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL; - // No CPU identification offered - if (pend != 0U) { - pend = 1U; - } else { - pend = 0U; - } - } - - return (pend); -} - -/** \brief Sets the given interrupt as pending using GIC's ISPENDR register. -* \param [in] IRQn The interrupt to be enabled. -*/ -__STATIC_INLINE void GIC_SetPendingIRQ(IRQn_Type IRQn) -{ - if (IRQn >= 16U) { - GICDistributor->ISPENDR[IRQn / 32U] = 1U << (IRQn % 32U); - } else { - // INTID 0-15 Software Generated Interrupt - // Forward the interrupt to the CPU interface that requested it - GICDistributor->SGIR = (IRQn | 0x02000000U); - } -} - -/** \brief Clears the given interrupt from being pending using GIC's ICPENDR register. -* \param [in] IRQn The interrupt to be enabled. -*/ -__STATIC_INLINE void GIC_ClearPendingIRQ(IRQn_Type IRQn) -{ - if (IRQn >= 16U) { - GICDistributor->ICPENDR[IRQn / 32U] = 1U << (IRQn % 32U); - } else { - // INTID 0-15 Software Generated Interrupt - GICDistributor->CPENDSGIR[IRQn / 4U] = 1U << ((IRQn % 4U) * 8U); - } -} - -/** \brief Sets the interrupt configuration using GIC's ICFGR register. -* \param [in] IRQn The interrupt to be configured. -* \param [in] int_config Int_config field value. Bit 0: Reserved (0 - N-N model, 1 - 1-N model for some GIC before v1) -* Bit 1: 0 - level sensitive, 1 - edge triggered -*/ -__STATIC_INLINE void GIC_SetConfiguration(IRQn_Type IRQn, uint32_t int_config) -{ - uint32_t icfgr = GICDistributor->ICFGR[IRQn / 16U]; /* read current register content */ - uint32_t shift = (IRQn % 16U) << 1U; /* calculate shift value */ - - int_config &= 3U; /* only 2 bits are valid */ - icfgr &= (~(3U << shift)); /* clear bits to change */ - icfgr |= ( int_config << shift); /* set new configuration */ - - GICDistributor->ICFGR[IRQn / 16U] = icfgr; /* write new register content */ -} - -/** \brief Get the interrupt configuration from the GIC's ICFGR register. -* \param [in] IRQn Interrupt to acquire the configuration for. -* \return Int_config field value. Bit 0: Reserved (0 - N-N model, 1 - 1-N model for some GIC before v1) -* Bit 1: 0 - level sensitive, 1 - edge triggered -*/ -__STATIC_INLINE uint32_t GIC_GetConfiguration(IRQn_Type IRQn) -{ - return (GICDistributor->ICFGR[IRQn / 16U] >> ((IRQn % 16U) >> 1U)); -} - -/** \brief Set the priority for the given interrupt in the GIC's IPRIORITYR register. -* \param [in] IRQn The interrupt to be configured. -* \param [in] priority The priority for the interrupt, lower values denote higher priorities. -*/ -__STATIC_INLINE void GIC_SetPriority(IRQn_Type IRQn, uint32_t priority) -{ - uint32_t mask = GICDistributor->IPRIORITYR[IRQn / 4U] & ~(0xFFUL << ((IRQn % 4U) * 8U)); - GICDistributor->IPRIORITYR[IRQn / 4U] = mask | ((priority & 0xFFUL) << ((IRQn % 4U) * 8U)); -} - -/** \brief Read the current interrupt priority from GIC's IPRIORITYR register. -* \param [in] IRQn The interrupt to be queried. -*/ -__STATIC_INLINE uint32_t GIC_GetPriority(IRQn_Type IRQn) -{ - return (GICDistributor->IPRIORITYR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL; -} - -/** \brief Set the interrupt priority mask using CPU's PMR register. -* \param [in] priority Priority mask to be set. -*/ -__STATIC_INLINE void GIC_SetInterfacePriorityMask(uint32_t priority) -{ - GICInterface->PMR = priority & 0xFFUL; //set priority mask -} - -/** \brief Read the current interrupt priority mask from CPU's PMR register. -* \result GICInterface_Type::PMR -*/ -__STATIC_INLINE uint32_t GIC_GetInterfacePriorityMask(void) -{ - return GICInterface->PMR; -} - -/** \brief Configures the group priority and subpriority split point using CPU's BPR register. -* \param [in] binary_point Amount of bits used as subpriority. -*/ -__STATIC_INLINE void GIC_SetBinaryPoint(uint32_t binary_point) -{ - GICInterface->BPR = binary_point & 7U; //set binary point -} - -/** \brief Read the current group priority and subpriority split point from CPU's BPR register. -* \return GICInterface_Type::BPR -*/ -__STATIC_INLINE uint32_t GIC_GetBinaryPoint(void) -{ - return GICInterface->BPR; -} - -/** \brief Get the status for a given interrupt. -* \param [in] IRQn The interrupt to get status for. -* \return 0 - not pending/active, 1 - pending, 2 - active, 3 - pending and active -*/ -__STATIC_INLINE uint32_t GIC_GetIRQStatus(IRQn_Type IRQn) -{ - uint32_t pending, active; - - active = ((GICDistributor->ISACTIVER[IRQn / 32U]) >> (IRQn % 32U)) & 1UL; - pending = ((GICDistributor->ISPENDR[IRQn / 32U]) >> (IRQn % 32U)) & 1UL; - - return ((active<<1U) | pending); -} - -/** \brief Generate a software interrupt using GIC's SGIR register. -* \param [in] IRQn Software interrupt to be generated. -* \param [in] target_list List of CPUs the software interrupt should be forwarded to. -* \param [in] filter_list Filter to be applied to determine interrupt receivers. -*/ -__STATIC_INLINE void GIC_SendSGI(IRQn_Type IRQn, uint32_t target_list, uint32_t filter_list) -{ - GICDistributor->SGIR = ((filter_list & 3U) << 24U) | ((target_list & 0xFFUL) << 16U) | (IRQn & 0x0FUL); -} - -/** \brief Get the interrupt number of the highest interrupt pending from CPU's HPPIR register. -* \return GICInterface_Type::HPPIR -*/ -__STATIC_INLINE uint32_t GIC_GetHighPendingIRQ(void) -{ - return GICInterface->HPPIR; -} - -/** \brief Provides information about the implementer and revision of the CPU interface. -* \return GICInterface_Type::IIDR -*/ -__STATIC_INLINE uint32_t GIC_GetInterfaceId(void) -{ - return GICInterface->IIDR; -} - -/** \brief Set the interrupt group from the GIC's IGROUPR register. -* \param [in] IRQn The interrupt to be queried. -* \param [in] group Interrupt group number: 0 - Group 0, 1 - Group 1 -*/ -__STATIC_INLINE void GIC_SetGroup(IRQn_Type IRQn, uint32_t group) -{ - uint32_t igroupr = GICDistributor->IGROUPR[IRQn / 32U]; - uint32_t shift = (IRQn % 32U); - - igroupr &= (~(1U << shift)); - igroupr |= ( (group & 1U) << shift); - - GICDistributor->IGROUPR[IRQn / 32U] = igroupr; -} -#define GIC_SetSecurity GIC_SetGroup - -/** \brief Get the interrupt group from the GIC's IGROUPR register. -* \param [in] IRQn The interrupt to be queried. -* \return 0 - Group 0, 1 - Group 1 -*/ -__STATIC_INLINE uint32_t GIC_GetGroup(IRQn_Type IRQn) -{ - return (GICDistributor->IGROUPR[IRQn / 32U] >> (IRQn % 32U)) & 1UL; -} -#define GIC_GetSecurity GIC_GetGroup - -/** \brief Initialize the interrupt distributor. -*/ -__STATIC_INLINE void GIC_DistInit(void) -{ - uint32_t i; - uint32_t num_irq = 0U; - uint32_t priority_field; - - //A reset sets all bits in the IGROUPRs corresponding to the SPIs to 0, - //configuring all of the interrupts as Secure. - - //Disable interrupt forwarding - GIC_DisableDistributor(); - //Get the maximum number of interrupts that the GIC supports - num_irq = 32U * ((GIC_DistributorInfo() & 0x1FU) + 1U); - - /* Priority level is implementation defined. - To determine the number of priority bits implemented write 0xFF to an IPRIORITYR - priority field and read back the value stored.*/ - GIC_SetPriority((IRQn_Type)0U, 0xFFU); - priority_field = GIC_GetPriority((IRQn_Type)0U); - - for (i = 32U; i < num_irq; i++) - { - //Disable the SPI interrupt - GIC_DisableIRQ((IRQn_Type)i); - //Set level-sensitive (and N-N model) - GIC_SetConfiguration((IRQn_Type)i, 0U); - //Set priority - GIC_SetPriority((IRQn_Type)i, priority_field/2U); - //Set target list to CPU0 - GIC_SetTarget((IRQn_Type)i, 1U); - } - //Enable distributor - GIC_EnableDistributor(); -} - -/** \brief Initialize the CPU's interrupt interface -*/ -__STATIC_INLINE void GIC_CPUInterfaceInit(void) -{ - uint32_t i; - uint32_t priority_field; - - //A reset sets all bits in the IGROUPRs corresponding to the SPIs to 0, - //configuring all of the interrupts as Secure. - - //Disable interrupt forwarding - GIC_DisableInterface(); - - /* Priority level is implementation defined. - To determine the number of priority bits implemented write 0xFF to an IPRIORITYR - priority field and read back the value stored.*/ - GIC_SetPriority((IRQn_Type)0U, 0xFFU); - priority_field = GIC_GetPriority((IRQn_Type)0U); - - //SGI and PPI - for (i = 0U; i < 32U; i++) - { - if(i > 15U) { - //Set level-sensitive (and N-N model) for PPI - GIC_SetConfiguration((IRQn_Type)i, 0U); - } - //Disable SGI and PPI interrupts - GIC_DisableIRQ((IRQn_Type)i); - //Set priority - GIC_SetPriority((IRQn_Type)i, priority_field/2U); - } - //Enable interface - GIC_EnableInterface(); - //Set binary point to 0 - GIC_SetBinaryPoint(0U); - //Set priority mask - GIC_SetInterfacePriorityMask(0xFFU); -} - -/** \brief Initialize and enable the GIC -*/ -__STATIC_INLINE void GIC_Enable(void) -{ - GIC_DistInit(); - GIC_CPUInterfaceInit(); //per CPU -} -#endif - -/* ########################## Generic Timer functions ############################ */ -#if (__TIM_PRESENT == 1U) || defined(DOXYGEN) - -/* PL1 Physical Timer */ -#if (__CORTEX_A == 7U) || defined(DOXYGEN) - -/** \brief Physical Timer Control register */ -typedef union -{ - struct - { - uint32_t ENABLE:1; /*!< \brief bit: 0 Enables the timer. */ - uint32_t IMASK:1; /*!< \brief bit: 1 Timer output signal mask bit. */ - uint32_t ISTATUS:1; /*!< \brief bit: 2 The status of the timer. */ - RESERVED(0:29, uint32_t) - } b; /*!< \brief Structure used for bit access */ - uint32_t w; /*!< \brief Type used for word access */ -} CNTP_CTL_Type; - -/** \brief Configures the frequency the timer shall run at. -* \param [in] value The timer frequency in Hz. -*/ -__STATIC_INLINE void PL1_SetCounterFrequency(uint32_t value) -{ - __set_CNTFRQ(value); - __ISB(); -} - -/** \brief Sets the reset value of the timer. -* \param [in] value The value the timer is loaded with. -*/ -__STATIC_INLINE void PL1_SetLoadValue(uint32_t value) -{ - __set_CNTP_TVAL(value); - __ISB(); -} - -/** \brief Get the current counter value. -* \return Current counter value. -*/ -__STATIC_INLINE uint32_t PL1_GetCurrentValue(void) -{ - return(__get_CNTP_TVAL()); -} - -/** \brief Get the current physical counter value. -* \return Current physical counter value. -*/ -__STATIC_INLINE uint64_t PL1_GetCurrentPhysicalValue(void) -{ - return(__get_CNTPCT()); -} - -/** \brief Set the physical compare value. -* \param [in] value New physical timer compare value. -*/ -__STATIC_INLINE void PL1_SetPhysicalCompareValue(uint64_t value) -{ - __set_CNTP_CVAL(value); - __ISB(); -} - -/** \brief Get the physical compare value. -* \return Physical compare value. -*/ -__STATIC_INLINE uint64_t PL1_GetPhysicalCompareValue(void) -{ - return(__get_CNTP_CVAL()); -} - -/** \brief Configure the timer by setting the control value. -* \param [in] value New timer control value. -*/ -__STATIC_INLINE void PL1_SetControl(uint32_t value) -{ - __set_CNTP_CTL(value); - __ISB(); -} - -/** \brief Get the control value. -* \return Control value. -*/ -__STATIC_INLINE uint32_t PL1_GetControl(void) -{ - return(__get_CNTP_CTL()); -} -#endif - -/* Private Timer */ -#if ((__CORTEX_A == 5U) || (__CORTEX_A == 9U)) || defined(DOXYGEN) -/** \brief Set the load value to timers LOAD register. -* \param [in] value The load value to be set. -*/ -__STATIC_INLINE void PTIM_SetLoadValue(uint32_t value) -{ - PTIM->LOAD = value; -} - -/** \brief Get the load value from timers LOAD register. -* \return Timer_Type::LOAD -*/ -__STATIC_INLINE uint32_t PTIM_GetLoadValue(void) -{ - return(PTIM->LOAD); -} - -/** \brief Set current counter value from its COUNTER register. -*/ -__STATIC_INLINE void PTIM_SetCurrentValue(uint32_t value) -{ - PTIM->COUNTER = value; -} - -/** \brief Get current counter value from timers COUNTER register. -* \result Timer_Type::COUNTER -*/ -__STATIC_INLINE uint32_t PTIM_GetCurrentValue(void) -{ - return(PTIM->COUNTER); -} - -/** \brief Configure the timer using its CONTROL register. -* \param [in] value The new configuration value to be set. -*/ -__STATIC_INLINE void PTIM_SetControl(uint32_t value) -{ - PTIM->CONTROL = value; -} - -/** ref Timer_Type::CONTROL Get the current timer configuration from its CONTROL register. -* \return Timer_Type::CONTROL -*/ -__STATIC_INLINE uint32_t PTIM_GetControl(void) -{ - return(PTIM->CONTROL); -} - -/** ref Timer_Type::CONTROL Get the event flag in timers ISR register. -* \return 0 - flag is not set, 1- flag is set -*/ -__STATIC_INLINE uint32_t PTIM_GetEventFlag(void) -{ - return (PTIM->ISR & 1UL); -} - -/** ref Timer_Type::CONTROL Clears the event flag in timers ISR register. -*/ -__STATIC_INLINE void PTIM_ClearEventFlag(void) -{ - PTIM->ISR = 1; -} -#endif -#endif - -/* ########################## MMU functions ###################################### */ - -#define SECTION_DESCRIPTOR (0x2) -#define SECTION_MASK (0xFFFFFFFC) - -#define SECTION_TEXCB_MASK (0xFFFF8FF3) -#define SECTION_B_SHIFT (2) -#define SECTION_C_SHIFT (3) -#define SECTION_TEX0_SHIFT (12) -#define SECTION_TEX1_SHIFT (13) -#define SECTION_TEX2_SHIFT (14) - -#define SECTION_XN_MASK (0xFFFFFFEF) -#define SECTION_XN_SHIFT (4) - -#define SECTION_DOMAIN_MASK (0xFFFFFE1F) -#define SECTION_DOMAIN_SHIFT (5) - -#define SECTION_P_MASK (0xFFFFFDFF) -#define SECTION_P_SHIFT (9) - -#define SECTION_AP_MASK (0xFFFF73FF) -#define SECTION_AP_SHIFT (10) -#define SECTION_AP2_SHIFT (15) - -#define SECTION_S_MASK (0xFFFEFFFF) -#define SECTION_S_SHIFT (16) - -#define SECTION_NG_MASK (0xFFFDFFFF) -#define SECTION_NG_SHIFT (17) - -#define SECTION_NS_MASK (0xFFF7FFFF) -#define SECTION_NS_SHIFT (19) - -#define PAGE_L1_DESCRIPTOR (0x1) -#define PAGE_L1_MASK (0xFFFFFFFC) - -#define PAGE_L2_4K_DESC (0x2) -#define PAGE_L2_4K_MASK (0xFFFFFFFD) - -#define PAGE_L2_64K_DESC (0x1) -#define PAGE_L2_64K_MASK (0xFFFFFFFC) - -#define PAGE_4K_TEXCB_MASK (0xFFFFFE33) -#define PAGE_4K_B_SHIFT (2) -#define PAGE_4K_C_SHIFT (3) -#define PAGE_4K_TEX0_SHIFT (6) -#define PAGE_4K_TEX1_SHIFT (7) -#define PAGE_4K_TEX2_SHIFT (8) - -#define PAGE_64K_TEXCB_MASK (0xFFFF8FF3) -#define PAGE_64K_B_SHIFT (2) -#define PAGE_64K_C_SHIFT (3) -#define PAGE_64K_TEX0_SHIFT (12) -#define PAGE_64K_TEX1_SHIFT (13) -#define PAGE_64K_TEX2_SHIFT (14) - -#define PAGE_TEXCB_MASK (0xFFFF8FF3) -#define PAGE_B_SHIFT (2) -#define PAGE_C_SHIFT (3) -#define PAGE_TEX_SHIFT (12) - -#define PAGE_XN_4K_MASK (0xFFFFFFFE) -#define PAGE_XN_4K_SHIFT (0) -#define PAGE_XN_64K_MASK (0xFFFF7FFF) -#define PAGE_XN_64K_SHIFT (15) - -#define PAGE_DOMAIN_MASK (0xFFFFFE1F) -#define PAGE_DOMAIN_SHIFT (5) - -#define PAGE_P_MASK (0xFFFFFDFF) -#define PAGE_P_SHIFT (9) - -#define PAGE_AP_MASK (0xFFFFFDCF) -#define PAGE_AP_SHIFT (4) -#define PAGE_AP2_SHIFT (9) - -#define PAGE_S_MASK (0xFFFFFBFF) -#define PAGE_S_SHIFT (10) - -#define PAGE_NG_MASK (0xFFFFF7FF) -#define PAGE_NG_SHIFT (11) - -#define PAGE_NS_MASK (0xFFFFFFF7) -#define PAGE_NS_SHIFT (3) - -#define OFFSET_1M (0x00100000) -#define OFFSET_64K (0x00010000) -#define OFFSET_4K (0x00001000) - -#define DESCRIPTOR_FAULT (0x00000000) - -/* Attributes enumerations */ - -/* Region size attributes */ -typedef enum -{ - SECTION, - PAGE_4k, - PAGE_64k, -} mmu_region_size_Type; - -/* Region type attributes */ -typedef enum -{ - NORMAL, - DEVICE, - SHARED_DEVICE, - NON_SHARED_DEVICE, - STRONGLY_ORDERED -} mmu_memory_Type; - -/* Region cacheability attributes */ -typedef enum -{ - NON_CACHEABLE, - WB_WA, - WT, - WB_NO_WA, -} mmu_cacheability_Type; - -/* Region parity check attributes */ -typedef enum -{ - ECC_DISABLED, - ECC_ENABLED, -} mmu_ecc_check_Type; - -/* Region execution attributes */ -typedef enum -{ - EXECUTE, - NON_EXECUTE, -} mmu_execute_Type; - -/* Region global attributes */ -typedef enum -{ - GLOBAL, - NON_GLOBAL, -} mmu_global_Type; - -/* Region shareability attributes */ -typedef enum -{ - NON_SHARED, - SHARED, -} mmu_shared_Type; - -/* Region security attributes */ -typedef enum -{ - SECURE, - NON_SECURE, -} mmu_secure_Type; - -/* Region access attributes */ -typedef enum -{ - NO_ACCESS, - RW, - READ, -} mmu_access_Type; - -/* Memory Region definition */ -typedef struct RegionStruct { - mmu_region_size_Type rg_t; - mmu_memory_Type mem_t; - uint8_t domain; - mmu_cacheability_Type inner_norm_t; - mmu_cacheability_Type outer_norm_t; - mmu_ecc_check_Type e_t; - mmu_execute_Type xn_t; - mmu_global_Type g_t; - mmu_secure_Type sec_t; - mmu_access_Type priv_t; - mmu_access_Type user_t; - mmu_shared_Type sh_t; - -} mmu_region_attributes_Type; - -//Following macros define the descriptors and attributes -//Sect_Normal. Outer & inner wb/wa, non-shareable, executable, rw, domain 0 -#define section_normal(descriptor_l1, region) region.rg_t = SECTION; \ - region.domain = 0x0; \ - region.e_t = ECC_DISABLED; \ - region.g_t = GLOBAL; \ - region.inner_norm_t = WB_WA; \ - region.outer_norm_t = WB_WA; \ - region.mem_t = NORMAL; \ - region.sec_t = SECURE; \ - region.xn_t = EXECUTE; \ - region.priv_t = RW; \ - region.user_t = RW; \ - region.sh_t = NON_SHARED; \ - MMU_GetSectionDescriptor(&descriptor_l1, region); - -//Sect_Normal_NC. Outer & inner non-cacheable, non-shareable, executable, rw, domain 0 -#define section_normal_nc(descriptor_l1, region) region.rg_t = SECTION; \ - region.domain = 0x0; \ - region.e_t = ECC_DISABLED; \ - region.g_t = GLOBAL; \ - region.inner_norm_t = NON_CACHEABLE; \ - region.outer_norm_t = NON_CACHEABLE; \ - region.mem_t = NORMAL; \ - region.sec_t = SECURE; \ - region.xn_t = EXECUTE; \ - region.priv_t = RW; \ - region.user_t = RW; \ - region.sh_t = NON_SHARED; \ - MMU_GetSectionDescriptor(&descriptor_l1, region); - -//Sect_Normal_Cod. Outer & inner wb/wa, non-shareable, executable, ro, domain 0 -#define section_normal_cod(descriptor_l1, region) region.rg_t = SECTION; \ - region.domain = 0x0; \ - region.e_t = ECC_DISABLED; \ - region.g_t = GLOBAL; \ - region.inner_norm_t = WB_WA; \ - region.outer_norm_t = WB_WA; \ - region.mem_t = NORMAL; \ - region.sec_t = SECURE; \ - region.xn_t = EXECUTE; \ - region.priv_t = READ; \ - region.user_t = READ; \ - region.sh_t = NON_SHARED; \ - MMU_GetSectionDescriptor(&descriptor_l1, region); - -//Sect_Normal_RO. Sect_Normal_Cod, but not executable -#define section_normal_ro(descriptor_l1, region) region.rg_t = SECTION; \ - region.domain = 0x0; \ - region.e_t = ECC_DISABLED; \ - region.g_t = GLOBAL; \ - region.inner_norm_t = WB_WA; \ - region.outer_norm_t = WB_WA; \ - region.mem_t = NORMAL; \ - region.sec_t = SECURE; \ - region.xn_t = NON_EXECUTE; \ - region.priv_t = READ; \ - region.user_t = READ; \ - region.sh_t = NON_SHARED; \ - MMU_GetSectionDescriptor(&descriptor_l1, region); - -//Sect_Normal_RW. Sect_Normal_Cod, but writeable and not executable -#define section_normal_rw(descriptor_l1, region) region.rg_t = SECTION; \ - region.domain = 0x0; \ - region.e_t = ECC_DISABLED; \ - region.g_t = GLOBAL; \ - region.inner_norm_t = WB_WA; \ - region.outer_norm_t = WB_WA; \ - region.mem_t = NORMAL; \ - region.sec_t = SECURE; \ - region.xn_t = NON_EXECUTE; \ - region.priv_t = RW; \ - region.user_t = RW; \ - region.sh_t = NON_SHARED; \ - MMU_GetSectionDescriptor(&descriptor_l1, region); -//Sect_SO. Strongly-ordered (therefore shareable), not executable, rw, domain 0, base addr 0 -#define section_so(descriptor_l1, region) region.rg_t = SECTION; \ - region.domain = 0x0; \ - region.e_t = ECC_DISABLED; \ - region.g_t = GLOBAL; \ - region.inner_norm_t = NON_CACHEABLE; \ - region.outer_norm_t = NON_CACHEABLE; \ - region.mem_t = STRONGLY_ORDERED; \ - region.sec_t = SECURE; \ - region.xn_t = NON_EXECUTE; \ - region.priv_t = RW; \ - region.user_t = RW; \ - region.sh_t = NON_SHARED; \ - MMU_GetSectionDescriptor(&descriptor_l1, region); - -//Sect_Device_RO. Device, non-shareable, non-executable, ro, domain 0, base addr 0 -#define section_device_ro(descriptor_l1, region) region.rg_t = SECTION; \ - region.domain = 0x0; \ - region.e_t = ECC_DISABLED; \ - region.g_t = GLOBAL; \ - region.inner_norm_t = NON_CACHEABLE; \ - region.outer_norm_t = NON_CACHEABLE; \ - region.mem_t = STRONGLY_ORDERED; \ - region.sec_t = SECURE; \ - region.xn_t = NON_EXECUTE; \ - region.priv_t = READ; \ - region.user_t = READ; \ - region.sh_t = NON_SHARED; \ - MMU_GetSectionDescriptor(&descriptor_l1, region); - -//Sect_Device_RW. Sect_Device_RO, but writeable -#define section_device_rw(descriptor_l1, region) region.rg_t = SECTION; \ - region.domain = 0x0; \ - region.e_t = ECC_DISABLED; \ - region.g_t = GLOBAL; \ - region.inner_norm_t = NON_CACHEABLE; \ - region.outer_norm_t = NON_CACHEABLE; \ - region.mem_t = STRONGLY_ORDERED; \ - region.sec_t = SECURE; \ - region.xn_t = NON_EXECUTE; \ - region.priv_t = RW; \ - region.user_t = RW; \ - region.sh_t = NON_SHARED; \ - MMU_GetSectionDescriptor(&descriptor_l1, region); -//Page_4k_Device_RW. Shared device, not executable, rw, domain 0 -#define page4k_device_rw(descriptor_l1, descriptor_l2, region) region.rg_t = PAGE_4k; \ - region.domain = 0x0; \ - region.e_t = ECC_DISABLED; \ - region.g_t = GLOBAL; \ - region.inner_norm_t = NON_CACHEABLE; \ - region.outer_norm_t = NON_CACHEABLE; \ - region.mem_t = SHARED_DEVICE; \ - region.sec_t = SECURE; \ - region.xn_t = NON_EXECUTE; \ - region.priv_t = RW; \ - region.user_t = RW; \ - region.sh_t = NON_SHARED; \ - MMU_GetPageDescriptor(&descriptor_l1, &descriptor_l2, region); - -//Page_64k_Device_RW. Shared device, not executable, rw, domain 0 -#define page64k_device_rw(descriptor_l1, descriptor_l2, region) region.rg_t = PAGE_64k; \ - region.domain = 0x0; \ - region.e_t = ECC_DISABLED; \ - region.g_t = GLOBAL; \ - region.inner_norm_t = NON_CACHEABLE; \ - region.outer_norm_t = NON_CACHEABLE; \ - region.mem_t = SHARED_DEVICE; \ - region.sec_t = SECURE; \ - region.xn_t = NON_EXECUTE; \ - region.priv_t = RW; \ - region.user_t = RW; \ - region.sh_t = NON_SHARED; \ - MMU_GetPageDescriptor(&descriptor_l1, &descriptor_l2, region); - -/** \brief Set section execution-never attribute - - \param [out] descriptor_l1 L1 descriptor. - \param [in] xn Section execution-never attribute : EXECUTE , NON_EXECUTE. - - \return 0 -*/ -__STATIC_INLINE int MMU_XNSection(uint32_t *descriptor_l1, mmu_execute_Type xn) -{ - *descriptor_l1 &= SECTION_XN_MASK; - *descriptor_l1 |= ((xn & 0x1) << SECTION_XN_SHIFT); - return 0; -} - -/** \brief Set section domain - - \param [out] descriptor_l1 L1 descriptor. - \param [in] domain Section domain - - \return 0 -*/ -__STATIC_INLINE int MMU_DomainSection(uint32_t *descriptor_l1, uint8_t domain) -{ - *descriptor_l1 &= SECTION_DOMAIN_MASK; - *descriptor_l1 |= ((domain & 0xF) << SECTION_DOMAIN_SHIFT); - return 0; -} - -/** \brief Set section parity check - - \param [out] descriptor_l1 L1 descriptor. - \param [in] p_bit Parity check: ECC_DISABLED, ECC_ENABLED - - \return 0 -*/ -__STATIC_INLINE int MMU_PSection(uint32_t *descriptor_l1, mmu_ecc_check_Type p_bit) -{ - *descriptor_l1 &= SECTION_P_MASK; - *descriptor_l1 |= ((p_bit & 0x1) << SECTION_P_SHIFT); - return 0; -} - -/** \brief Set section access privileges - - \param [out] descriptor_l1 L1 descriptor. - \param [in] user User Level Access: NO_ACCESS, RW, READ - \param [in] priv Privilege Level Access: NO_ACCESS, RW, READ - \param [in] afe Access flag enable - - \return 0 -*/ -__STATIC_INLINE int MMU_APSection(uint32_t *descriptor_l1, mmu_access_Type user, mmu_access_Type priv, uint32_t afe) -{ - uint32_t ap = 0; - - if (afe == 0) { //full access - if ((priv == NO_ACCESS) && (user == NO_ACCESS)) { ap = 0x0; } - else if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; } - else if ((priv == RW) && (user == READ)) { ap = 0x2; } - else if ((priv == RW) && (user == RW)) { ap = 0x3; } - else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; } - else if ((priv == READ) && (user == READ)) { ap = 0x7; } - } - - else { //Simplified access - if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; } - else if ((priv == RW) && (user == RW)) { ap = 0x3; } - else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; } - else if ((priv == READ) && (user == READ)) { ap = 0x7; } - } - - *descriptor_l1 &= SECTION_AP_MASK; - *descriptor_l1 |= (ap & 0x3) << SECTION_AP_SHIFT; - *descriptor_l1 |= ((ap & 0x4)>>2) << SECTION_AP2_SHIFT; - - return 0; -} - -/** \brief Set section shareability - - \param [out] descriptor_l1 L1 descriptor. - \param [in] s_bit Section shareability: NON_SHARED, SHARED - - \return 0 -*/ -__STATIC_INLINE int MMU_SharedSection(uint32_t *descriptor_l1, mmu_shared_Type s_bit) -{ - *descriptor_l1 &= SECTION_S_MASK; - *descriptor_l1 |= ((s_bit & 0x1) << SECTION_S_SHIFT); - return 0; -} - -/** \brief Set section Global attribute - - \param [out] descriptor_l1 L1 descriptor. - \param [in] g_bit Section attribute: GLOBAL, NON_GLOBAL - - \return 0 -*/ -__STATIC_INLINE int MMU_GlobalSection(uint32_t *descriptor_l1, mmu_global_Type g_bit) -{ - *descriptor_l1 &= SECTION_NG_MASK; - *descriptor_l1 |= ((g_bit & 0x1) << SECTION_NG_SHIFT); - return 0; -} - -/** \brief Set section Security attribute - - \param [out] descriptor_l1 L1 descriptor. - \param [in] s_bit Section Security attribute: SECURE, NON_SECURE - - \return 0 -*/ -__STATIC_INLINE int MMU_SecureSection(uint32_t *descriptor_l1, mmu_secure_Type s_bit) -{ - *descriptor_l1 &= SECTION_NS_MASK; - *descriptor_l1 |= ((s_bit & 0x1) << SECTION_NS_SHIFT); - return 0; -} - -/* Page 4k or 64k */ -/** \brief Set 4k/64k page execution-never attribute - - \param [out] descriptor_l2 L2 descriptor. - \param [in] xn Page execution-never attribute : EXECUTE , NON_EXECUTE. - \param [in] page Page size: PAGE_4k, PAGE_64k, - - \return 0 -*/ -__STATIC_INLINE int MMU_XNPage(uint32_t *descriptor_l2, mmu_execute_Type xn, mmu_region_size_Type page) -{ - if (page == PAGE_4k) - { - *descriptor_l2 &= PAGE_XN_4K_MASK; - *descriptor_l2 |= ((xn & 0x1) << PAGE_XN_4K_SHIFT); - } - else - { - *descriptor_l2 &= PAGE_XN_64K_MASK; - *descriptor_l2 |= ((xn & 0x1) << PAGE_XN_64K_SHIFT); - } - return 0; -} - -/** \brief Set 4k/64k page domain - - \param [out] descriptor_l1 L1 descriptor. - \param [in] domain Page domain - - \return 0 -*/ -__STATIC_INLINE int MMU_DomainPage(uint32_t *descriptor_l1, uint8_t domain) -{ - *descriptor_l1 &= PAGE_DOMAIN_MASK; - *descriptor_l1 |= ((domain & 0xf) << PAGE_DOMAIN_SHIFT); - return 0; -} - -/** \brief Set 4k/64k page parity check - - \param [out] descriptor_l1 L1 descriptor. - \param [in] p_bit Parity check: ECC_DISABLED, ECC_ENABLED - - \return 0 -*/ -__STATIC_INLINE int MMU_PPage(uint32_t *descriptor_l1, mmu_ecc_check_Type p_bit) -{ - *descriptor_l1 &= SECTION_P_MASK; - *descriptor_l1 |= ((p_bit & 0x1) << SECTION_P_SHIFT); - return 0; -} - -/** \brief Set 4k/64k page access privileges - - \param [out] descriptor_l2 L2 descriptor. - \param [in] user User Level Access: NO_ACCESS, RW, READ - \param [in] priv Privilege Level Access: NO_ACCESS, RW, READ - \param [in] afe Access flag enable - - \return 0 -*/ -__STATIC_INLINE int MMU_APPage(uint32_t *descriptor_l2, mmu_access_Type user, mmu_access_Type priv, uint32_t afe) -{ - uint32_t ap = 0; - - if (afe == 0) { //full access - if ((priv == NO_ACCESS) && (user == NO_ACCESS)) { ap = 0x0; } - else if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; } - else if ((priv == RW) && (user == READ)) { ap = 0x2; } - else if ((priv == RW) && (user == RW)) { ap = 0x3; } - else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; } - else if ((priv == READ) && (user == READ)) { ap = 0x6; } - } - - else { //Simplified access - if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; } - else if ((priv == RW) && (user == RW)) { ap = 0x3; } - else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; } - else if ((priv == READ) && (user == READ)) { ap = 0x7; } - } - - *descriptor_l2 &= PAGE_AP_MASK; - *descriptor_l2 |= (ap & 0x3) << PAGE_AP_SHIFT; - *descriptor_l2 |= ((ap & 0x4)>>2) << PAGE_AP2_SHIFT; - - return 0; -} - -/** \brief Set 4k/64k page shareability - - \param [out] descriptor_l2 L2 descriptor. - \param [in] s_bit 4k/64k page shareability: NON_SHARED, SHARED - - \return 0 -*/ -__STATIC_INLINE int MMU_SharedPage(uint32_t *descriptor_l2, mmu_shared_Type s_bit) -{ - *descriptor_l2 &= PAGE_S_MASK; - *descriptor_l2 |= ((s_bit & 0x1) << PAGE_S_SHIFT); - return 0; -} - -/** \brief Set 4k/64k page Global attribute - - \param [out] descriptor_l2 L2 descriptor. - \param [in] g_bit 4k/64k page attribute: GLOBAL, NON_GLOBAL - - \return 0 -*/ -__STATIC_INLINE int MMU_GlobalPage(uint32_t *descriptor_l2, mmu_global_Type g_bit) -{ - *descriptor_l2 &= PAGE_NG_MASK; - *descriptor_l2 |= ((g_bit & 0x1) << PAGE_NG_SHIFT); - return 0; -} - -/** \brief Set 4k/64k page Security attribute - - \param [out] descriptor_l1 L1 descriptor. - \param [in] s_bit 4k/64k page Security attribute: SECURE, NON_SECURE - - \return 0 -*/ -__STATIC_INLINE int MMU_SecurePage(uint32_t *descriptor_l1, mmu_secure_Type s_bit) -{ - *descriptor_l1 &= PAGE_NS_MASK; - *descriptor_l1 |= ((s_bit & 0x1) << PAGE_NS_SHIFT); - return 0; -} - -/** \brief Set Section memory attributes - - \param [out] descriptor_l1 L1 descriptor. - \param [in] mem Section memory type: NORMAL, DEVICE, SHARED_DEVICE, NON_SHARED_DEVICE, STRONGLY_ORDERED - \param [in] outer Outer cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA, - \param [in] inner Inner cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA, - - \return 0 -*/ -__STATIC_INLINE int MMU_MemorySection(uint32_t *descriptor_l1, mmu_memory_Type mem, mmu_cacheability_Type outer, mmu_cacheability_Type inner) -{ - *descriptor_l1 &= SECTION_TEXCB_MASK; - - if (STRONGLY_ORDERED == mem) - { - return 0; - } - else if (SHARED_DEVICE == mem) - { - *descriptor_l1 |= (1 << SECTION_B_SHIFT); - } - else if (NON_SHARED_DEVICE == mem) - { - *descriptor_l1 |= (1 << SECTION_TEX1_SHIFT); - } - else if (NORMAL == mem) - { - *descriptor_l1 |= 1 << SECTION_TEX2_SHIFT; - switch(inner) - { - case NON_CACHEABLE: - break; - case WB_WA: - *descriptor_l1 |= (1 << SECTION_B_SHIFT); - break; - case WT: - *descriptor_l1 |= 1 << SECTION_C_SHIFT; - break; - case WB_NO_WA: - *descriptor_l1 |= (1 << SECTION_B_SHIFT) | (1 << SECTION_C_SHIFT); - break; - } - switch(outer) - { - case NON_CACHEABLE: - break; - case WB_WA: - *descriptor_l1 |= (1 << SECTION_TEX0_SHIFT); - break; - case WT: - *descriptor_l1 |= 1 << SECTION_TEX1_SHIFT; - break; - case WB_NO_WA: - *descriptor_l1 |= (1 << SECTION_TEX0_SHIFT) | (1 << SECTION_TEX0_SHIFT); - break; - } - } - return 0; -} - -/** \brief Set 4k/64k page memory attributes - - \param [out] descriptor_l2 L2 descriptor. - \param [in] mem 4k/64k page memory type: NORMAL, DEVICE, SHARED_DEVICE, NON_SHARED_DEVICE, STRONGLY_ORDERED - \param [in] outer Outer cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA, - \param [in] inner Inner cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA, - \param [in] page Page size - - \return 0 -*/ -__STATIC_INLINE int MMU_MemoryPage(uint32_t *descriptor_l2, mmu_memory_Type mem, mmu_cacheability_Type outer, mmu_cacheability_Type inner, mmu_region_size_Type page) -{ - *descriptor_l2 &= PAGE_4K_TEXCB_MASK; - - if (page == PAGE_64k) - { - //same as section - MMU_MemorySection(descriptor_l2, mem, outer, inner); - } - else - { - if (STRONGLY_ORDERED == mem) - { - return 0; - } - else if (SHARED_DEVICE == mem) - { - *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT); - } - else if (NON_SHARED_DEVICE == mem) - { - *descriptor_l2 |= (1 << PAGE_4K_TEX1_SHIFT); - } - else if (NORMAL == mem) - { - *descriptor_l2 |= 1 << PAGE_4K_TEX2_SHIFT; - switch(inner) - { - case NON_CACHEABLE: - break; - case WB_WA: - *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT); - break; - case WT: - *descriptor_l2 |= 1 << PAGE_4K_C_SHIFT; - break; - case WB_NO_WA: - *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT) | (1 << PAGE_4K_C_SHIFT); - break; - } - switch(outer) - { - case NON_CACHEABLE: - break; - case WB_WA: - *descriptor_l2 |= (1 << PAGE_4K_TEX0_SHIFT); - break; - case WT: - *descriptor_l2 |= 1 << PAGE_4K_TEX1_SHIFT; - break; - case WB_NO_WA: - *descriptor_l2 |= (1 << PAGE_4K_TEX0_SHIFT) | (1 << PAGE_4K_TEX0_SHIFT); - break; - } - } - } - - return 0; -} - -/** \brief Create a L1 section descriptor - - \param [out] descriptor L1 descriptor - \param [in] reg Section attributes - - \return 0 -*/ -__STATIC_INLINE int MMU_GetSectionDescriptor(uint32_t *descriptor, mmu_region_attributes_Type reg) -{ - *descriptor = 0; - - MMU_MemorySection(descriptor, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t); - MMU_XNSection(descriptor,reg.xn_t); - MMU_DomainSection(descriptor, reg.domain); - MMU_PSection(descriptor, reg.e_t); - MMU_APSection(descriptor, reg.user_t, reg.priv_t, 1); - MMU_SharedSection(descriptor,reg.sh_t); - MMU_GlobalSection(descriptor,reg.g_t); - MMU_SecureSection(descriptor,reg.sec_t); - *descriptor &= SECTION_MASK; - *descriptor |= SECTION_DESCRIPTOR; - - return 0; -} - - -/** \brief Create a L1 and L2 4k/64k page descriptor - - \param [out] descriptor L1 descriptor - \param [out] descriptor2 L2 descriptor - \param [in] reg 4k/64k page attributes - - \return 0 -*/ -__STATIC_INLINE int MMU_GetPageDescriptor(uint32_t *descriptor, uint32_t *descriptor2, mmu_region_attributes_Type reg) -{ - *descriptor = 0; - *descriptor2 = 0; - - switch (reg.rg_t) - { - case PAGE_4k: - MMU_MemoryPage(descriptor2, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t, PAGE_4k); - MMU_XNPage(descriptor2, reg.xn_t, PAGE_4k); - MMU_DomainPage(descriptor, reg.domain); - MMU_PPage(descriptor, reg.e_t); - MMU_APPage(descriptor2, reg.user_t, reg.priv_t, 1); - MMU_SharedPage(descriptor2,reg.sh_t); - MMU_GlobalPage(descriptor2,reg.g_t); - MMU_SecurePage(descriptor,reg.sec_t); - *descriptor &= PAGE_L1_MASK; - *descriptor |= PAGE_L1_DESCRIPTOR; - *descriptor2 &= PAGE_L2_4K_MASK; - *descriptor2 |= PAGE_L2_4K_DESC; - break; - - case PAGE_64k: - MMU_MemoryPage(descriptor2, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t, PAGE_64k); - MMU_XNPage(descriptor2, reg.xn_t, PAGE_64k); - MMU_DomainPage(descriptor, reg.domain); - MMU_PPage(descriptor, reg.e_t); - MMU_APPage(descriptor2, reg.user_t, reg.priv_t, 1); - MMU_SharedPage(descriptor2,reg.sh_t); - MMU_GlobalPage(descriptor2,reg.g_t); - MMU_SecurePage(descriptor,reg.sec_t); - *descriptor &= PAGE_L1_MASK; - *descriptor |= PAGE_L1_DESCRIPTOR; - *descriptor2 &= PAGE_L2_64K_MASK; - *descriptor2 |= PAGE_L2_64K_DESC; - break; - - case SECTION: - //error - break; - } - - return 0; -} - -/** \brief Create a 1MB Section - - \param [in] ttb Translation table base address - \param [in] base_address Section base address - \param [in] count Number of sections to create - \param [in] descriptor_l1 L1 descriptor (region attributes) - -*/ -__STATIC_INLINE void MMU_TTSection(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1) -{ - uint32_t offset; - uint32_t entry; - uint32_t i; - - offset = base_address >> 20; - entry = (base_address & 0xFFF00000) | descriptor_l1; - - //4 bytes aligned - ttb = ttb + offset; - - for (i = 0; i < count; i++ ) - { - //4 bytes aligned - *ttb++ = entry; - entry += OFFSET_1M; - } -} - -/** \brief Create a 4k page entry - - \param [in] ttb L1 table base address - \param [in] base_address 4k base address - \param [in] count Number of 4k pages to create - \param [in] descriptor_l1 L1 descriptor (region attributes) - \param [in] ttb_l2 L2 table base address - \param [in] descriptor_l2 L2 descriptor (region attributes) - -*/ -__STATIC_INLINE void MMU_TTPage4k(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1, uint32_t *ttb_l2, uint32_t descriptor_l2 ) -{ - - uint32_t offset, offset2; - uint32_t entry, entry2; - uint32_t i; - - offset = base_address >> 20; - entry = ((int)ttb_l2 & 0xFFFFFC00) | descriptor_l1; - - //4 bytes aligned - ttb += offset; - //create l1_entry - *ttb = entry; - - offset2 = (base_address & 0xff000) >> 12; - ttb_l2 += offset2; - entry2 = (base_address & 0xFFFFF000) | descriptor_l2; - for (i = 0; i < count; i++ ) - { - //4 bytes aligned - *ttb_l2++ = entry2; - entry2 += OFFSET_4K; - } -} - -/** \brief Create a 64k page entry - - \param [in] ttb L1 table base address - \param [in] base_address 64k base address - \param [in] count Number of 64k pages to create - \param [in] descriptor_l1 L1 descriptor (region attributes) - \param [in] ttb_l2 L2 table base address - \param [in] descriptor_l2 L2 descriptor (region attributes) - -*/ -__STATIC_INLINE void MMU_TTPage64k(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1, uint32_t *ttb_l2, uint32_t descriptor_l2 ) -{ - uint32_t offset, offset2; - uint32_t entry, entry2; - uint32_t i,j; - - - offset = base_address >> 20; - entry = ((int)ttb_l2 & 0xFFFFFC00) | descriptor_l1; - - //4 bytes aligned - ttb += offset; - //create l1_entry - *ttb = entry; - - offset2 = (base_address & 0xff000) >> 12; - ttb_l2 += offset2; - entry2 = (base_address & 0xFFFF0000) | descriptor_l2; - for (i = 0; i < count; i++ ) - { - //create 16 entries - for (j = 0; j < 16; j++) - { - //4 bytes aligned - *ttb_l2++ = entry2; - } - entry2 += OFFSET_64K; - } -} - -/** \brief Enable MMU -*/ -__STATIC_INLINE void MMU_Enable(void) -{ - // Set M bit 0 to enable the MMU - // Set AFE bit to enable simplified access permissions model - // Clear TRE bit to disable TEX remap and A bit to disable strict alignment fault checking - __set_SCTLR( (__get_SCTLR() & ~(1 << 28) & ~(1 << 1)) | 1 | (1 << 29)); - __ISB(); -} - -/** \brief Disable MMU -*/ -__STATIC_INLINE void MMU_Disable(void) -{ - // Clear M bit 0 to disable the MMU - __set_SCTLR( __get_SCTLR() & ~1); - __ISB(); -} - -/** \brief Invalidate entire unified TLB -*/ - -__STATIC_INLINE void MMU_InvalidateTLB(void) -{ - __set_TLBIALL(0); - __DSB(); //ensure completion of the invalidation - __ISB(); //ensure instruction fetch path sees new state -} - - -#ifdef __cplusplus -} -#endif +#include "./a-profile/armv7a.h" -#endif /* __CORE_CA_H_DEPENDANT */ -#endif /* __CMSIS_GENERIC */ +#endif /* __CORE_CA_H */ \ No newline at end of file diff --git a/CMSIS/Core/Include/core_ca35.h b/CMSIS/Core/Include/core_ca35.h new file mode 100644 index 000000000..7cceb4f22 --- /dev/null +++ b/CMSIS/Core/Include/core_ca35.h @@ -0,0 +1,41 @@ +/**************************************************************************//** + * @file core_ca35.h + * @brief CMSIS Cortex-A57 Core Peripheral Access Layer Header File + * @version V1.0.0 + * @date 2. July 2023 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef __CORE_CA35_H +#define __CORE_CA35_H + +#define __CORTEX_A 35U /*!< \brief Cortex-A35 Core */ + +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CA35_REV + #define __CA35_REV 0x0000U + #warning "__CA35_REV not defined in device header file; using default!" + #endif +#endif + +#include "./a-profile/armv8a.h" + + +#endif /* __CORE_CA35_H */ \ No newline at end of file diff --git a/CMSIS/Core/Include/core_ca5.h b/CMSIS/Core/Include/core_ca5.h new file mode 100644 index 000000000..47c928115 --- /dev/null +++ b/CMSIS/Core/Include/core_ca5.h @@ -0,0 +1,41 @@ +/**************************************************************************//** + * @file core_ca5.h + * @brief CMSIS Cortex-A5 Core Peripheral Access Layer Header File + * @version V1.0.0 + * @date 2. July 2023 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef __CORE_CA5_H +#define __CORE_CA5_H + +#define __CORTEX_A 5U /*!< \brief Cortex-A5 Core */ + +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CA5_REV + #define __CA5_REV 0x0000U + #warning "__CA5_REV not defined in device header file; using default!" + #endif +#endif + +#include "./a-profile/armv7a.h" + + +#endif /* __CORE_CA5_H */ \ No newline at end of file diff --git a/CMSIS/Core/Include/core_ca53.h b/CMSIS/Core/Include/core_ca53.h new file mode 100644 index 000000000..58a2fc195 --- /dev/null +++ b/CMSIS/Core/Include/core_ca53.h @@ -0,0 +1,41 @@ +/**************************************************************************//** + * @file core_ca53.h + * @brief CMSIS Cortex-A53 Core Peripheral Access Layer Header File + * @version V1.0.0 + * @date 2. July 2023 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef __CORE_CA53_H +#define __CORE_CA53_H + +#define __CORTEX_A 53U /*!< \brief Cortex-A53 Core */ + +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CA53_REV + #define __CA53_REV 0x0000U + #warning "__CA53_REV not defined in device header file; using default!" + #endif +#endif + +#include "./a-profile/armv8a.h" + + +#endif /* __CORE_CA53_H */ \ No newline at end of file diff --git a/CMSIS/Core/Include/core_ca55.h b/CMSIS/Core/Include/core_ca55.h new file mode 100644 index 000000000..6a3e73840 --- /dev/null +++ b/CMSIS/Core/Include/core_ca55.h @@ -0,0 +1,41 @@ +/**************************************************************************//** + * @file core_ca55.h + * @brief CMSIS Cortex-A57 Core Peripheral Access Layer Header File + * @version V1.0.0 + * @date 2. July 2023 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef __CORE_CA55_H +#define __CORE_CA55_H + +#define __CORTEX_A 55U /*!< \brief Cortex-A55 Core */ + +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CA55_REV + #define __CA55_REV 0x0000U + #warning "__CA55_REV not defined in device header file; using default!" + #endif +#endif + +#include "./a-profile/armv8a.h" + + +#endif /* __CORE_CA55_H */ \ No newline at end of file diff --git a/CMSIS/Core/Include/core_ca57.h b/CMSIS/Core/Include/core_ca57.h new file mode 100644 index 000000000..9ed92f40e --- /dev/null +++ b/CMSIS/Core/Include/core_ca57.h @@ -0,0 +1,41 @@ +/**************************************************************************//** + * @file core_ca57.h + * @brief CMSIS Cortex-A57 Core Peripheral Access Layer Header File + * @version V1.0.0 + * @date 2. July 2023 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef __CORE_CA57_H +#define __CORE_CA57_H + +#define __CORTEX_A 57U /*!< \brief Cortex-A57 Core */ + +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CA57_REV + #define __CA57_REV 0x0000U + #warning "__CA57_REV not defined in device header file; using default!" + #endif +#endif + +#include "./a-profile/armv8a.h" + + +#endif /* __CORE_CA57_H */ \ No newline at end of file diff --git a/CMSIS/Core/Include/core_ca7.h b/CMSIS/Core/Include/core_ca7.h new file mode 100644 index 000000000..b7c7f65b5 --- /dev/null +++ b/CMSIS/Core/Include/core_ca7.h @@ -0,0 +1,41 @@ +/**************************************************************************//** + * @file core_ca7.h + * @brief CMSIS Cortex-A7 Core Peripheral Access Layer Header File + * @version V1.0.0 + * @date 2. July 2023 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef __CORE_CA7_H +#define __CORE_CA7_H + +#define __CORTEX_A 7U /*!< \brief Cortex-A7 Core */ + +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CA7_REV + #define __CA7_REV 0x0000U + #warning "__CA7_REV not defined in device header file; using default!" + #endif +#endif + +#include "./a-profile/armv7a.h" + + +#endif /* __CORE_CA7_H */ \ No newline at end of file diff --git a/CMSIS/Core/Include/core_ca9.h b/CMSIS/Core/Include/core_ca9.h new file mode 100644 index 000000000..7a72a80b3 --- /dev/null +++ b/CMSIS/Core/Include/core_ca9.h @@ -0,0 +1,41 @@ +/**************************************************************************//** + * @file core_ca9.h + * @brief CMSIS Cortex-A9 Core Peripheral Access Layer Header File + * @version V1.0.0 + * @date 2. July 2023 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef __CORE_CA9_H +#define __CORE_CA9_H + +#define __CORTEX_A 9U /*!< \brief Cortex-A9 Core */ + +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CA7_REV + #define __CA7_REV 0x0000U + #warning "__CA7_REV not defined in device header file; using default!" + #endif +#endif + +#include "./a-profile/armv7a.h" + + +#endif /* __CORE_CA9_H */ \ No newline at end of file diff --git a/CMSIS/Core/Include/core_cr4.h b/CMSIS/Core/Include/core_cr4.h new file mode 100644 index 000000000..b53b20a88 --- /dev/null +++ b/CMSIS/Core/Include/core_cr4.h @@ -0,0 +1,41 @@ +/**************************************************************************//** + * @file core_cr4.h + * @brief CMSIS Cortex-R4 Core Peripheral Access Layer Header File + * @version V1.0.0 + * @date 2. July 2023 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef __CORE_CR4_H +#define __CORE_CR4_H + +#define __CORTEX_R 4U /*!< \brief Cortex-R5 Core */ + +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CR4_REV + #define __CR4_REV 0x0000U + #warning "__CR4_REV not defined in device header file; using default!" + #endif +#endif + +#include "./r-profile/armv7r.h" + + +#endif /* __CORE_CR4_H */ \ No newline at end of file diff --git a/CMSIS/Core/Include/core_cr5.h b/CMSIS/Core/Include/core_cr5.h new file mode 100644 index 000000000..9db67a142 --- /dev/null +++ b/CMSIS/Core/Include/core_cr5.h @@ -0,0 +1,41 @@ +/**************************************************************************//** + * @file core_cr5.h + * @brief CMSIS Cortex-R5 Core Peripheral Access Layer Header File + * @version V1.0.0 + * @date 2. July 2023 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef __CORE_CR5_H +#define __CORE_CR5_H + +#define __CORTEX_R 5U /*!< \brief Cortex-R5 Core */ + +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CR5_REV + #define __CR5_REV 0x0000U + #warning "__CR5_REV not defined in device header file; using default!" + #endif +#endif + +#include "./r-profile/armv7r.h" + + +#endif /* __CORE_CR5_H */ \ No newline at end of file diff --git a/CMSIS/Core/Include/core_cr52.h b/CMSIS/Core/Include/core_cr52.h new file mode 100644 index 000000000..3d50cf8b8 --- /dev/null +++ b/CMSIS/Core/Include/core_cr52.h @@ -0,0 +1,41 @@ +/**************************************************************************//** + * @file core_cr52.h + * @brief CMSIS Cortex-R52 Core Peripheral Access Layer Header File + * @version V1.0.0 + * @date 2. July 2023 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef __CORE_CR52_H +#define __CORE_CR52_H + +#define __CORTEX_R 52U /*!< \brief Cortex-R52 Core */ + +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CR52_REV + #define __CR52_REV 0x0000U + #warning "__CR52_REV not defined in device header file; using default!" + #endif +#endif + +#include "./r-profile/armv8r.h" + + +#endif /* __CORE_CR52_H */ \ No newline at end of file diff --git a/CMSIS/Core/Include/core_cr7.h b/CMSIS/Core/Include/core_cr7.h new file mode 100644 index 000000000..d36b546f6 --- /dev/null +++ b/CMSIS/Core/Include/core_cr7.h @@ -0,0 +1,41 @@ +/**************************************************************************//** + * @file core_cr7.h + * @brief CMSIS Cortex-R7 Core Peripheral Access Layer Header File + * @version V1.0.0 + * @date 2. July 2023 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef __CORE_CR7_H +#define __CORE_CR7_H + +#define __CORTEX_R 7U /*!< \brief Cortex-R7 Core */ + +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CR7_REV + #define __CR7_REV 0x0000U + #warning "__CR7_REV not defined in device header file; using default!" + #endif +#endif + +#include "./r-profile/armv7r.h" + + +#endif /* __CORE_CR7_H */ \ No newline at end of file diff --git a/CMSIS/Core/Include/core_cr8.h b/CMSIS/Core/Include/core_cr8.h new file mode 100644 index 000000000..1ab407bb5 --- /dev/null +++ b/CMSIS/Core/Include/core_cr8.h @@ -0,0 +1,41 @@ +/**************************************************************************//** + * @file core_cr8.h + * @brief CMSIS Cortex-R8 Core Peripheral Access Layer Header File + * @version V1.0.0 + * @date 2. July 2023 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef __CORE_CR8_H +#define __CORE_CR8_H + +#define __CORTEX_R 8U /*!< \brief Cortex-R8 Core */ + +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CR8_REV + #define __CR8_REV 0x0000U + #warning "__CR8_REV not defined in device header file; using default!" + #endif +#endif + +#include "./r-profile/armv7r.h" + + +#endif /* __CORE_CR8_H */ \ No newline at end of file diff --git a/CMSIS/Core/Include/m-profile/cmsis_armclang_m.h b/CMSIS/Core/Include/m-profile/cmsis_armclang_m.h index 84bd6faf4..9b30234b5 100644 --- a/CMSIS/Core/Include/m-profile/cmsis_armclang_m.h +++ b/CMSIS/Core/Include/m-profile/cmsis_armclang_m.h @@ -2,7 +2,7 @@ * @file cmsis_armclang_m.h * @brief CMSIS compiler armclang (Arm Compiler 6) header file * @version V6.0.0 - * @date 27. July 2023 + * @date 4. August 2023 ******************************************************************************/ /* * Copyright (c) 2009-2023 Arm Limited. All rights reserved. @@ -27,1440 +27,649 @@ #pragma clang system_header /* treat file as system include file */ -#if (__ARM_ACLE >= 200) - #include -#else - #error Compiler must support ACLE V2.0 -#endif /* (__ARM_ACLE >= 200) */ - -/* CMSIS compiler specific defines */ -#ifndef __ASM - #define __ASM __asm -#endif -#ifndef __INLINE - #define __INLINE __inline -#endif -#ifndef __STATIC_INLINE - #define __STATIC_INLINE static __inline -#endif -#ifndef __STATIC_FORCEINLINE - #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline -#endif -#ifndef __NO_RETURN - #define __NO_RETURN __attribute__((__noreturn__)) -#endif -#ifndef __USED - #define __USED __attribute__((used)) -#endif -#ifndef __WEAK - #define __WEAK __attribute__((weak)) -#endif -#ifndef __PACKED - #define __PACKED __attribute__((packed, aligned(1))) -#endif -#ifndef __PACKED_STRUCT - #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) -#endif -#ifndef __PACKED_UNION - #define __PACKED_UNION union __attribute__((packed, aligned(1))) -#endif -#ifndef __UNALIGNED_UINT16_WRITE - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT16_READ - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) -#endif -#ifndef __UNALIGNED_UINT32_WRITE - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT32_READ - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) -#endif -#ifndef __ALIGNED - #define __ALIGNED(x) __attribute__((aligned(x))) -#endif -#ifndef __RESTRICT - #define __RESTRICT __restrict -#endif -#ifndef __COMPILER_BARRIER - #define __COMPILER_BARRIER() __ASM volatile("":::"memory") -#endif -#ifndef __NO_INIT - #define __NO_INIT __attribute__ ((section (".bss.noinit"))) -#endif -#ifndef __ALIAS - #define __ALIAS(x) __attribute__ ((alias(x))) +#ifndef __CMSIS_ARMCLANG_H + #error "This file must not be included directly" #endif /* ######################### Startup and Lowlevel Init ######################## */ + #ifndef __PROGRAM_START -#define __PROGRAM_START __main + #define __PROGRAM_START __main #endif #ifndef __INITIAL_SP -#define __INITIAL_SP Image$$ARM_LIB_STACK$$ZI$$Limit + #define __INITIAL_SP Image$$ARM_LIB_STACK$$ZI$$Limit #endif #ifndef __STACK_LIMIT -#define __STACK_LIMIT Image$$ARM_LIB_STACK$$ZI$$Base + #define __STACK_LIMIT Image$$ARM_LIB_STACK$$ZI$$Base #endif #ifndef __VECTOR_TABLE -#define __VECTOR_TABLE __Vectors + #define __VECTOR_TABLE __Vectors #endif #ifndef __VECTOR_TABLE_ATTRIBUTE -#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET"))) -#endif - -#if (__ARM_FEATURE_CMSE == 3) -#ifndef __STACK_SEAL -#define __STACK_SEAL Image$$STACKSEAL$$ZI$$Base -#endif - -#ifndef __TZ_STACK_SEAL_SIZE -#define __TZ_STACK_SEAL_SIZE 8U -#endif - -#ifndef __TZ_STACK_SEAL_VALUE -#define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL -#endif - - -__STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { - *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; -} + #define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET"))) #endif - -/* ########################## Core Instruction Access ######################### */ -/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface - Access to dedicated instructions - @{ -*/ - -/* Define macros for porting to both thumb1 and thumb2. - * For thumb1, use low register (r0-r7), specified by constraint "l" - * Otherwise, use general registers, specified by constraint "r" */ -#if defined (__thumb__) && !defined (__thumb2__) -#define __CMSIS_GCC_OUT_REG(r) "=l" (r) -#define __CMSIS_GCC_RW_REG(r) "+l" (r) -#define __CMSIS_GCC_USE_REG(r) "l" (r) -#else -#define __CMSIS_GCC_OUT_REG(r) "=r" (r) -#define __CMSIS_GCC_RW_REG(r) "+r" (r) -#define __CMSIS_GCC_USE_REG(r) "r" (r) -#endif - -/** - \brief No Operation - \details No Operation does nothing. This instruction can be used for code alignment purposes. - */ -#define __NOP() __nop() - - -/** - \brief Wait For Interrupt - \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. - */ -#define __WFI() __wfi() - - -/** - \brief Wait For Event - \details Wait For Event is a hint instruction that permits the processor to enter - a low-power state until one of a number of events occurs. - */ -#define __WFE() __wfe() - - -/** - \brief Send Event - \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. - */ -#define __SEV() __sev() - - -/** - \brief Instruction Synchronization Barrier - \details Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or memory, - after the instruction has been completed. - */ -#define __ISB() __isb(0xF) - - -/** - \brief Data Synchronization Barrier - \details Acts as a special kind of Data Memory Barrier. - It completes when all explicit memory accesses before this instruction complete. - */ -#define __DSB() __dsb(0xF) - - -/** - \brief Data Memory Barrier - \details Ensures the apparent order of the explicit memory operations before - and after the instruction, without ensuring their completion. - */ -#define __DMB() __dmb(0xF) - - -/** - \brief Reverse byte order (32 bit) - \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REV(value) __rev(value) - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REV16(value) __rev16(value) - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REVSH(value) __revsh(value) - - -/** - \brief Rotate Right in unsigned value (32 bit) - \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. - \param [in] op1 Value to rotate - \param [in] op2 Number of Bits to rotate - \return Rotated value - */ -#define __ROR(op1, op2) __ror(op1, op2) - - -/** - \brief Breakpoint - \details Causes the processor to enter Debug state. - Debug tools can use this to investigate system state when the instruction at a particular address is reached. - \param [in] value is ignored by the processor. - If required, a debugger can use it to store additional information about the breakpoint. - */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) - - -/** - \brief Reverse bit order of value - \details Reverses the bit order of the given value. - \param [in] value Value to reverse - \return Reversed value - */ -#define __RBIT(value) __rbit(value) - - -/** - \brief Count leading zeros - \details Counts the number of leading zeros of a data value. - \param [in] value Value to count the leading zeros - \return number of leading zeros in value - */ -#define __CLZ(value) __clz(value) - - -#if ((__ARM_FEATURE_SAT >= 1) && \ - (__ARM_ARCH_ISA_THUMB >= 2) ) -/* __ARM_FEATURE_SAT is wrong for Armv8-M Baseline devices */ -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value - */ -#define __SSAT(value, sat) __ssat(value, sat) - - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -#define __USAT(value, sat) __usat(value, sat) - -#else /* (__ARM_FEATURE_SAT >= 1) */ -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value - */ -__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) -{ - if ((sat >= 1U) && (sat <= 32U)) - { - const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); - const int32_t min = -1 - max ; - if (val > max) - { - return (max); - } - else if (val < min) - { - return (min); - } - } - return (val); -} - - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) -{ - if (sat <= 31U) - { - const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) - { - return (max); - } - else if (val < 0) - { - return (0U); - } - } - return ((uint32_t)val); -} -#endif /* (__ARM_FEATURE_SAT >= 1) */ - - -#if (__ARM_FEATURE_LDREX >= 1) -/** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. - */ -#define __CLREX __builtin_arm_clrex - - -/** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -#define __LDREXB (uint8_t)__builtin_arm_ldrex - - -/** - \brief STR Exclusive (8 bit) - \details Executes a exclusive STR instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXB (uint32_t)__builtin_arm_strex -#endif /* (__ARM_FEATURE_LDREX >= 1) */ - - -#if (__ARM_FEATURE_LDREX >= 2) -/** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -#define __LDREXH (uint16_t)__builtin_arm_ldrex - - -/** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXH (uint32_t)__builtin_arm_strex -#endif /* (__ARM_FEATURE_LDREX >= 2) */ - - -#if (__ARM_FEATURE_LDREX >= 4) -/** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -#define __LDREXW (uint32_t)__builtin_arm_ldrex - - -/** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXW (uint32_t)__builtin_arm_strex -#endif /* (__ARM_FEATURE_LDREX >= 4) */ - - -#if (__ARM_ARCH_ISA_THUMB >= 2) -/** - \brief Rotate Right with Extend (32 bit) - \details Moves each bit of a bitstring right by one bit. - The carry input is shifted in at the left end of the bitstring. - \param [in] value Value to rotate - \return Rotated value - */ -__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) -{ - uint32_t result; - - __ASM volatile ("rrx %0, %1" : "=r" (result) : "r" (value)); - return (result); -} - - -/** - \brief LDRT Unprivileged (8 bit) - \details Executes a Unprivileged LDRT instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint8_t)result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (16 bit) - \details Executes a Unprivileged LDRT instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint16_t)result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (32 bit) - \details Executes a Unprivileged LDRT instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return (result); -} - - -/** - \brief STRT Unprivileged (8 bit) - \details Executes a Unprivileged STRT instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) -{ - __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (16 bit) - \details Executes a Unprivileged STRT instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) -{ - __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (32 bit) - \details Executes a Unprivileged STRT instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) -{ - __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); -} -#endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ - - -#if (__ARM_ARCH >= 8) -/** - \brief Load-Acquire (8 bit) - \details Executes a LDAB instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint8_t)result); /* Add explicit type cast here */ -} - - -/** - \brief Load-Acquire (16 bit) - \details Executes a LDAH instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint16_t)result); /* Add explicit type cast here */ -} - - -/** - \brief Load-Acquire (32 bit) - \details Executes a LDA instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return (result); -} - - -/** - \brief Store-Release (8 bit) - \details Executes a STLB instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) -{ - __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Store-Release (16 bit) - \details Executes a STLH instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) -{ - __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Store-Release (32 bit) - \details Executes a STL instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) -{ - __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Load-Acquire Exclusive (8 bit) - \details Executes a LDAB exclusive instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -#define __LDAEXB (uint8_t)__builtin_arm_ldaex - - -/** - \brief Load-Acquire Exclusive (16 bit) - \details Executes a LDAH exclusive instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -#define __LDAEXH (uint16_t)__builtin_arm_ldaex - - -/** - \brief Load-Acquire Exclusive (32 bit) - \details Executes a LDA exclusive instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -#define __LDAEX (uint32_t)__builtin_arm_ldaex - - -/** - \brief Store-Release Exclusive (8 bit) - \details Executes a STLB exclusive instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STLEXB (uint32_t)__builtin_arm_stlex - - -/** - \brief Store-Release Exclusive (16 bit) - \details Executes a STLH exclusive instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STLEXH (uint32_t)__builtin_arm_stlex - - -/** - \brief Store-Release Exclusive (32 bit) - \details Executes a STL exclusive instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STLEX (uint32_t)__builtin_arm_stlex - -#endif /* (__ARM_ARCH >= 8) */ - -/** @}*/ /* end of group CMSIS_Core_InstructionInterface */ - - -/* ########################### Core Function Access ########################### */ -/** \ingroup CMSIS_Core_FunctionInterface - \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions - @{ - */ - -/** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. - Can only be executed in Privileged modes. - */ -#ifndef __ARM_COMPAT_H -__STATIC_FORCEINLINE void __enable_irq(void) -{ - __ASM volatile ("cpsie i" : : : "memory"); -} -#endif - - -/** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting special-purpose register PRIMASK. - Can only be executed in Privileged modes. - */ -#ifndef __ARM_COMPAT_H -__STATIC_FORCEINLINE void __disable_irq(void) -{ - __ASM volatile ("cpsid i" : : : "memory"); -} -#endif - - -/** - \brief Get Control Register - \details Returns the content of the Control Register. - \return Control Register value - */ -__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, control" : "=r" (result) ); - return (result); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Control Register (non-secure) - \details Returns the content of the non-secure Control Register when in secure mode. - \return non-secure Control Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); - return (result); -} -#endif - - -/** - \brief Set Control Register - \details Writes the given value to the Control Register. - \param [in] control Control Register value to set - */ -__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) -{ - __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); - __ISB(); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Control Register (non-secure) - \details Writes the given value to the non-secure Control Register when in secure state. - \param [in] control Control Register value to set - */ -__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) -{ - __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); - __ISB(); -} -#endif - - -/** - \brief Get IPSR Register - \details Returns the content of the IPSR Register. - \return IPSR Register value - */ -__STATIC_FORCEINLINE uint32_t __get_IPSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); - return (result); -} - - -/** - \brief Get APSR Register - \details Returns the content of the APSR Register. - \return APSR Register value - */ -__STATIC_FORCEINLINE uint32_t __get_APSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, apsr" : "=r" (result) ); - return (result); -} - - -/** - \brief Get xPSR Register - \details Returns the content of the xPSR Register. - \return xPSR Register value - */ -__STATIC_FORCEINLINE uint32_t __get_xPSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); - return (result); -} - - -/** - \brief Get Process Stack Pointer - \details Returns the current value of the Process Stack Pointer (PSP). - \return PSP Register value - */ -__STATIC_FORCEINLINE uint32_t __get_PSP(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, psp" : "=r" (result) ); - return (result); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Process Stack Pointer (non-secure) - \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. - \return PSP Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); - return (result); -} -#endif - - -/** - \brief Set Process Stack Pointer - \details Assigns the given value to the Process Stack Pointer (PSP). - \param [in] topOfProcStack Process Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) -{ - __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Process Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. - \param [in] topOfProcStack Process Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) -{ - __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); -} -#endif - - -/** - \brief Get Main Stack Pointer - \details Returns the current value of the Main Stack Pointer (MSP). - \return MSP Register value - */ -__STATIC_FORCEINLINE uint32_t __get_MSP(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, msp" : "=r" (result) ); - return (result); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Main Stack Pointer (non-secure) - \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. - \return MSP Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); - return (result); -} -#endif - - -/** - \brief Set Main Stack Pointer - \details Assigns the given value to the Main Stack Pointer (MSP). - \param [in] topOfMainStack Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) -{ - __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Main Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. - \param [in] topOfMainStack Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) -{ - __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); -} -#endif - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Stack Pointer (non-secure) - \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. - \return SP Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); - return (result); -} +#if (__ARM_FEATURE_CMSE == 3) + #ifndef __STACK_SEAL + #define __STACK_SEAL Image$$STACKSEAL$$ZI$$Base + #endif + + #ifndef __TZ_STACK_SEAL_SIZE + #define __TZ_STACK_SEAL_SIZE 8U + #endif + + #ifndef __TZ_STACK_SEAL_VALUE + #define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL + #endif + + __STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { + *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; + } +#endif -/** - \brief Set Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. - \param [in] topOfStack Stack Pointer value to set +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ */ -__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) -{ - __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); -} -#endif /** - \brief Get Priority Mask - \details Returns the current state of the priority mask bit from the Priority Mask Register. - \return Priority Mask value + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value */ -__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) +__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) { uint32_t result; - __ASM volatile ("MRS %0, primask" : "=r" (result) ); + __ASM volatile ("MRS %0, control" : "=r" (result) ); return (result); } #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Priority Mask (non-secure) - \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. - \return Priority Mask value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); - return (result); -} + /** + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return (result); + } #endif /** - \brief Set Priority Mask - \details Assigns the given value to the Priority Mask Register. - \param [in] priMask Priority Mask + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set */ -__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) { - __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); + __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); + __ISB(); } #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Priority Mask (non-secure) - \details Assigns the given value to the non-secure Priority Mask Register when in secure state. - \param [in] priMask Priority Mask - */ -__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) -{ - __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); -} + /** + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set + */ + __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) + { + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); + __ISB(); + } #endif -#if (__ARM_ARCH_ISA_THUMB >= 2) /** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. - Can only be executed in Privileged modes. + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value */ -__STATIC_FORCEINLINE void __enable_fault_irq(void) +__STATIC_FORCEINLINE uint32_t __get_IPSR(void) { - __ASM volatile ("cpsie f" : : : "memory"); -} - + uint32_t result; -/** - \brief Disable FIQ - \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_fault_irq(void) -{ - __ASM volatile ("cpsid f" : : : "memory"); + __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); + return (result); } /** - \brief Get Base Priority - \details Returns the current value of the Base Priority register. - \return Base Priority register value + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value */ -__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) +__STATIC_FORCEINLINE uint32_t __get_APSR(void) { uint32_t result; - __ASM volatile ("MRS %0, basepri" : "=r" (result) ); + __ASM volatile ("MRS %0, apsr" : "=r" (result) ); return (result); } -#if (__ARM_FEATURE_CMSE == 3) /** - \brief Get Base Priority (non-secure) - \details Returns the current value of the non-secure Base Priority register when in secure state. - \return Base Priority register value + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value */ -__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) +__STATIC_FORCEINLINE uint32_t __get_xPSR(void) { uint32_t result; - __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); return (result); } -#endif /** - \brief Set Base Priority - \details Assigns the given value to the Base Priority register. - \param [in] basePri Base Priority value to set + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value */ -__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) +__STATIC_FORCEINLINE uint32_t __get_PSP(void) { - __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); + uint32_t result; + + __ASM volatile ("MRS %0, psp" : "=r" (result) ); + return (result); } #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Base Priority (non-secure) - \details Assigns the given value to the non-secure Base Priority register when in secure state. - \param [in] basePri Base Priority value to set - */ -__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) -{ - __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); -} + /** + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return (result); + } #endif /** - \brief Set Base Priority with condition - \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, - or the new value increases the BASEPRI priority level. - \param [in] basePri Base Priority value to set + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set */ -__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) +__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) { - __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); + __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); } +#if (__ARM_FEATURE_CMSE == 3) + /** + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) + { + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); + } +#endif + + /** - \brief Get Fault Mask - \details Returns the current value of the Fault Mask register. - \return Fault Mask register value + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value */ -__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) +__STATIC_FORCEINLINE uint32_t __get_MSP(void) { uint32_t result; - __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + __ASM volatile ("MRS %0, msp" : "=r" (result) ); return (result); } #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Fault Mask (non-secure) - \details Returns the current value of the non-secure Fault Mask register when in secure state. - \return Fault Mask register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); - return (result); -} + /** + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return (result); + } #endif /** - \brief Set Fault Mask - \details Assigns the given value to the Fault Mask register. - \param [in] faultMask Fault Mask value to set + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set */ -__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) +__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) { - __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); + __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); } #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Fault Mask (non-secure) - \details Assigns the given value to the non-secure Fault Mask register when in secure state. - \param [in] faultMask Fault Mask value to set - */ -__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) -{ - __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); -} + /** + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) + { + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); + } #endif -#endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ - -#if (__ARM_ARCH >= 8) -/** - \brief Get Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - - \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). - \return PSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) -{ -#if (((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) - /* without main extensions, the non-secure PSPLIM is RAZ/WI */ - return (0U); -#else - uint32_t result; - __ASM volatile ("MRS %0, psplim" : "=r" (result) ); - return (result); +#if (__ARM_FEATURE_CMSE == 3) + /** + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return (result); + } + + + /** + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) + { + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); + } #endif -} -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Process Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \return PSPLIM Register value +/** + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) +__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) { -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure PSPLIM is RAZ/WI */ - return (0U); -#else uint32_t result; - __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + + __ASM volatile ("MRS %0, primask" : "=r" (result) ); return (result); -#endif } -#endif -/** - \brief Set Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - - \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set - */ -__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) -{ -#if (((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) - /* without main extensions, the non-secure PSPLIM is RAZ/WI */ - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); +#if (__ARM_FEATURE_CMSE == 3) + /** + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + return (result); + } #endif -} -#if (__ARM_FEATURE_CMSE == 3) /** - \brief Set Process Stack Pointer (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask */ -__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) +__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) { -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure PSPLIM is RAZ/WI */ - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); -#endif + __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); } -#endif - -/** - \brief Get Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). - \return MSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) -{ -#if (((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - return (0U); -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim" : "=r" (result) ); - return (result); +#if (__ARM_FEATURE_CMSE == 3) + /** + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask + */ + __STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) + { + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); + } #endif -} -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. +#if (__ARM_ARCH_ISA_THUMB >= 2) + /** + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value + */ + __STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, basepri" : "=r" (result) ); + return (result); + } - \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. - \return MSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - return (0U); -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); - return (result); -#endif -} -#endif + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return (result); + } + #endif -/** - \brief Set Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). - \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set - */ -__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) -{ -#if (((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); -#endif -} + /** + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set + */ + __STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) + { + __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); + } -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set + */ + __STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) + { + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); + } + #endif - \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. - \param [in] MainStackPtrLimit Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); -#endif -} -#endif -#endif /* (__ARM_ARCH >= 8) */ + /** + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set + */ + __STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) + { + __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); + } + + + /** + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value + */ + __STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + return (result); + } + + + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return (result); + } + #endif -/** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value - */ -__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) -{ -#if (__ARM_FP >= 1) - return (__builtin_arm_get_fpscr()); -#else - return (0U); -#endif -} + /** + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set + */ + __STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) + { + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); + } + + + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set + */ + __STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) + { + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); + } + #endif +#endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ -/** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set - */ -__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) -{ -#if (__ARM_FP >= 1) - __builtin_arm_set_fpscr(fpscr); -#else - (void)fpscr; -#endif -} +#if (__ARM_ARCH >= 8) + /** + \brief Get Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). + \return PSPLIM Register value + */ + __STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) + { + #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ + (__ARM_FEATURE_CMSE < 3) ) + /* without main extensions, the non-secure PSPLIM is RAZ/WI */ + return (0U); + #else + uint32_t result; + __ASM volatile ("MRS %0, psplim" : "=r" (result) ); + return (result); + #endif + } + + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure PSPLIM is RAZ/WI */ + return (0U); + #else + uint32_t result; + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return (result); + #endif + } + #endif + + + /** + \brief Set Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ + __STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) + { + #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ + (__ARM_FEATURE_CMSE < 3) ) + /* without main extensions, the non-secure PSPLIM is RAZ/WI */ + (void)ProcStackPtrLimit; + #else + __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); + #endif + } -/** @} end of CMSIS_Core_RegAccFunctions */ + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ + __STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure PSPLIM is RAZ/WI */ + (void)ProcStackPtrLimit; + #else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); + #endif + } + #endif + + + /** + \brief Get Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). + \return MSPLIM Register value + */ + __STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) + { + #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ + (__ARM_FEATURE_CMSE < 3) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + return (0U); + #else + uint32_t result; + __ASM volatile ("MRS %0, msplim" : "=r" (result) ); + return (result); + #endif + } -/* ################### Compiler specific Intrinsics ########################### */ -/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics - Access to dedicated SIMD instructions - @{ -*/ - -#if (__ARM_FEATURE_DSP == 1) -#define __SADD8 __sadd8 -#define __QADD8 __qadd8 -#define __SHADD8 __shadd8 -#define __UADD8 __uadd8 -#define __UQADD8 __uqadd8 -#define __UHADD8 __uhadd8 -#define __SSUB8 __ssub8 -#define __QSUB8 __qsub8 -#define __SHSUB8 __shsub8 -#define __USUB8 __usub8 -#define __UQSUB8 __uqsub8 -#define __UHSUB8 __uhsub8 -#define __SADD16 __sadd16 -#define __QADD16 __qadd16 -#define __SHADD16 __shadd16 -#define __UADD16 __uadd16 -#define __UQADD16 __uqadd16 -#define __UHADD16 __uhadd16 -#define __SSUB16 __ssub16 -#define __QSUB16 __qsub16 -#define __SHSUB16 __shsub16 -#define __USUB16 __usub16 -#define __UQSUB16 __uqsub16 -#define __UHSUB16 __uhsub16 -#define __SASX __sasx -#define __QASX __qasx -#define __SHASX __shasx -#define __UASX __uasx -#define __UQASX __uqasx -#define __UHASX __uhasx -#define __SSAX __ssax -#define __QSAX __qsax -#define __SHSAX __shsax -#define __USAX __usax -#define __UQSAX __uqsax -#define __UHSAX __uhsax -#define __USAD8 __usad8 -#define __USADA8 __usada8 -#define __SSAT16 __ssat16 -#define __USAT16 __usat16 -#define __UXTB16 __uxtb16 -#define __UXTAB16 __uxtab16 -#define __SXTB16 __sxtb16 -#define __SXTAB16 __sxtab16 -#define __SMUAD __smuad -#define __SMUADX __smuadx -#define __SMLAD __smlad -#define __SMLADX __smladx -#define __SMLALD __smlald -#define __SMLALDX __smlaldx -#define __SMUSD __smusd -#define __SMUSDX __smusdx -#define __SMLSD __smlsd -#define __SMLSDX __smlsdx -#define __SMLSLD __smlsld -#define __SMLSLDX __smlsldx -#define __SEL __sel -#define __QADD __qadd -#define __QSUB __qsub - -#define __PKHBT(ARG1,ARG2,ARG3) \ -__extension__ \ -({ \ - uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ - __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ - __RES; \ - }) - -#define __PKHTB(ARG1,ARG2,ARG3) \ -__extension__ \ -({ \ - uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ - if (ARG3 == 0) \ - __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ - else \ - __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ - __RES; \ - }) - -#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) - -#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) - -__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) -{ - int32_t result; + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + return (0U); + #else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return (result); + #endif + } + #endif + + + /** + \brief Set Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). + \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + */ + __STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) + { + #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ + (__ARM_FEATURE_CMSE < 3) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + (void)MainStackPtrLimit; + #else + __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); + #endif + } - __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); - return (result); -} -#endif /* (__ARM_FEATURE_DSP == 1) */ -/** @} end of group CMSIS_SIMD_intrinsics */ + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + (void)MainStackPtrLimit; + #else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); + #endif + } + #endif +#endif /* (__ARM_ARCH >= 8) */ +/** @} end of CMSIS_Core_RegAccFunctions */ #endif /* __CMSIS_ARMCLANG_M_H */ diff --git a/CMSIS/Core/Include/m-profile/cmsis_clang_m.h b/CMSIS/Core/Include/m-profile/cmsis_clang_m.h index 48600ee1a..83dca1295 100644 --- a/CMSIS/Core/Include/m-profile/cmsis_clang_m.h +++ b/CMSIS/Core/Include/m-profile/cmsis_clang_m.h @@ -1,8 +1,8 @@ /**************************************************************************//** * @file cmsis_clang_m.h * @brief CMSIS compiler LLVM/Clang header file - * @version V1.1.0 - * @date 27. July 2023 + * @version V6.0.0 + * @date 4. August 2023 ******************************************************************************/ /* * Copyright (c) 2009-2023 Arm Limited. All rights reserved. @@ -27,1441 +27,649 @@ #pragma clang system_header /* treat file as system include file */ -#if (__ARM_ACLE >= 200) - #include -#else - #error Compiler must support ACLE V2.0 -#endif /* (__ARM_ACLE >= 200) */ - -/* Fallback for __has_builtin */ -#ifndef __has_builtin - #define __has_builtin(x) (0) -#endif - -/* CMSIS compiler specific defines */ -#ifndef __ASM - #define __ASM __asm -#endif -#ifndef __INLINE - #define __INLINE inline -#endif -#ifndef __STATIC_INLINE - #define __STATIC_INLINE static inline -#endif -#ifndef __STATIC_FORCEINLINE - #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline -#endif -#ifndef __NO_RETURN - #define __NO_RETURN __attribute__((__noreturn__)) -#endif -#ifndef __USED - #define __USED __attribute__((used)) -#endif -#ifndef __WEAK - #define __WEAK __attribute__((weak)) -#endif -#ifndef __PACKED - #define __PACKED __attribute__((packed, aligned(1))) -#endif -#ifndef __PACKED_STRUCT - #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) -#endif -#ifndef __PACKED_UNION - #define __PACKED_UNION union __attribute__((packed, aligned(1))) -#endif -#ifndef __UNALIGNED_UINT16_WRITE - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT16_READ - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) -#endif -#ifndef __UNALIGNED_UINT32_WRITE - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT32_READ - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) -#endif -#ifndef __ALIGNED - #define __ALIGNED(x) __attribute__((aligned(x))) -#endif -#ifndef __RESTRICT - #define __RESTRICT __restrict -#endif -#ifndef __COMPILER_BARRIER - #define __COMPILER_BARRIER() __ASM volatile("":::"memory") -#endif -#ifndef __NO_INIT - #define __NO_INIT __attribute__ ((section (".bss.noinit"))) -#endif -#ifndef __ALIAS - #define __ALIAS(x) __attribute__ ((alias(x))) +#ifndef __CMSIS_CLANG_H + #error "This file must not be included directly" #endif /* ######################### Startup and Lowlevel Init ######################## */ + #ifndef __PROGRAM_START -#define __PROGRAM_START _start + #define __PROGRAM_START __main #endif #ifndef __INITIAL_SP -#define __INITIAL_SP __stack + #define __INITIAL_SP Image$$ARM_LIB_STACK$$ZI$$Limit #endif #ifndef __STACK_LIMIT -#define __STACK_LIMIT __stack_limit + #define __STACK_LIMIT Image$$ARM_LIB_STACK$$ZI$$Base #endif #ifndef __VECTOR_TABLE -#define __VECTOR_TABLE __Vectors + #define __VECTOR_TABLE __Vectors #endif #ifndef __VECTOR_TABLE_ATTRIBUTE -#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section(".vectors"))) + #define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET"))) #endif -#if (__ARM_FEATURE_CMSE == 3) -#ifndef __STACK_SEAL -#define __STACK_SEAL __stack_seal -#endif - -#ifndef __TZ_STACK_SEAL_SIZE -#define __TZ_STACK_SEAL_SIZE 8U -#endif - -#ifndef __TZ_STACK_SEAL_VALUE -#define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL -#endif - - -__STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { - *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; -} -#endif - - -/* ########################## Core Instruction Access ######################### */ -/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface - Access to dedicated instructions - @{ -*/ - -/* Define macros for porting to both thumb1 and thumb2. - * For thumb1, use low register (r0-r7), specified by constraint "l" - * Otherwise, use general registers, specified by constraint "r" */ -#if defined (__thumb__) && !defined (__thumb2__) -#define __CMSIS_GCC_OUT_REG(r) "=l" (r) -#define __CMSIS_GCC_RW_REG(r) "+l" (r) -#define __CMSIS_GCC_USE_REG(r) "l" (r) -#else -#define __CMSIS_GCC_OUT_REG(r) "=r" (r) -#define __CMSIS_GCC_RW_REG(r) "+r" (r) -#define __CMSIS_GCC_USE_REG(r) "r" (r) -#endif - -/** - \brief No Operation - \details No Operation does nothing. This instruction can be used for code alignment purposes. - */ -#define __NOP() __nop() - - -/** - \brief Wait For Interrupt - \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. - */ -#define __WFI() __wfi() - - -/** - \brief Wait For Event - \details Wait For Event is a hint instruction that permits the processor to enter - a low-power state until one of a number of events occurs. - */ -#define __WFE() __wfe() - - -/** - \brief Send Event - \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. - */ -#define __SEV() __sev() - - -/** - \brief Instruction Synchronization Barrier - \details Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or memory, - after the instruction has been completed. - */ -#define __ISB() __isb(0xF) - - -/** - \brief Data Synchronization Barrier - \details Acts as a special kind of Data Memory Barrier. - It completes when all explicit memory accesses before this instruction complete. - */ -#define __DSB() __dsb(0xF) - - -/** - \brief Data Memory Barrier - \details Ensures the apparent order of the explicit memory operations before - and after the instruction, without ensuring their completion. - */ -#define __DMB() __dmb(0xF) - - -/** - \brief Reverse byte order (32 bit) - \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REV(value) __rev(value) - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REV16(value) __rev16(value) - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REVSH(value) __revsh(value) - - -/** - \brief Rotate Right in unsigned value (32 bit) - \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. - \param [in] op1 Value to rotate - \param [in] op2 Number of Bits to rotate - \return Rotated value - */ -#define __ROR(op1, op2) __ror(op1, op2) - - -/** - \brief Breakpoint - \details Causes the processor to enter Debug state. - Debug tools can use this to investigate system state when the instruction at a particular address is reached. - \param [in] value is ignored by the processor. - If required, a debugger can use it to store additional information about the breakpoint. - */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) - - -/** - \brief Reverse bit order of value - \details Reverses the bit order of the given value. - \param [in] value Value to reverse - \return Reversed value - */ -#define __RBIT(value) __rbit(value) - - -/** - \brief Count leading zeros - \details Counts the number of leading zeros of a data value. - \param [in] value Value to count the leading zeros - \return number of leading zeros in value - */ -#define __CLZ(value) __clz(value) - - -#if ((__ARM_FEATURE_SAT >= 1) && \ - (__ARM_ARCH_ISA_THUMB >= 2) ) -/* __ARM_FEATURE_SAT is wrong for Armv8-M Baseline devices */ -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value - */ -#define __SSAT(value, sat) __ssat(value, sat) - - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -#define __USAT(value, sat) __usat(value, sat) - -#else /* (__ARM_FEATURE_SAT >= 1) */ -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value - */ -__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) -{ - if ((sat >= 1U) && (sat <= 32U)) - { - const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); - const int32_t min = -1 - max ; - if (val > max) - { - return (max); - } - else if (val < min) - { - return (min); - } - } - return (val); -} - - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) -{ - if (sat <= 31U) - { - const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) - { - return (max); - } - else if (val < 0) - { - return (0U); - } - } - return ((uint32_t)val); -} -#endif /* (__ARM_FEATURE_SAT >= 1) */ - - -#if (__ARM_FEATURE_LDREX >= 1) -/** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. - */ -#define __CLREX __builtin_arm_clrex - - -/** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -#define __LDREXB (uint8_t)__builtin_arm_ldrex - - -/** - \brief STR Exclusive (8 bit) - \details Executes a exclusive STR instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXB (uint32_t)__builtin_arm_strex -#endif /* (__ARM_FEATURE_LDREX >= 1) */ - - -#if (__ARM_FEATURE_LDREX >= 2) -/** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -#define __LDREXH (uint16_t)__builtin_arm_ldrex - - -/** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXH (uint32_t)__builtin_arm_strex -#endif /* (__ARM_FEATURE_LDREX >= 2) */ - - -#if (__ARM_FEATURE_LDREX >= 4) -/** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -#define __LDREXW (uint32_t)__builtin_arm_ldrex - - -/** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXW (uint32_t)__builtin_arm_strex -#endif /* (__ARM_FEATURE_LDREX >= 4) */ - - -#if (__ARM_ARCH_ISA_THUMB >= 2) -/** - \brief Rotate Right with Extend (32 bit) - \details Moves each bit of a bitstring right by one bit. - The carry input is shifted in at the left end of the bitstring. - \param [in] value Value to rotate - \return Rotated value - */ -__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) -{ - uint32_t result; - - __ASM volatile ("rrx %0, %1" : "=r" (result) : "r" (value)); - return (result); -} - - -/** - \brief LDRT Unprivileged (8 bit) - \details Executes a Unprivileged LDRT instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint8_t)result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (16 bit) - \details Executes a Unprivileged LDRT instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint16_t)result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (32 bit) - \details Executes a Unprivileged LDRT instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return (result); -} - - -/** - \brief STRT Unprivileged (8 bit) - \details Executes a Unprivileged STRT instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) -{ - __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (16 bit) - \details Executes a Unprivileged STRT instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) -{ - __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (32 bit) - \details Executes a Unprivileged STRT instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) -{ - __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); -} -#endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ - - -#if (__ARM_ARCH >= 8) -/** - \brief Load-Acquire (8 bit) - \details Executes a LDAB instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint8_t)result); /* Add explicit type cast here */ -} - - -/** - \brief Load-Acquire (16 bit) - \details Executes a LDAH instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint16_t)result); /* Add explicit type cast here */ -} - - -/** - \brief Load-Acquire (32 bit) - \details Executes a LDA instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return (result); -} - - -/** - \brief Store-Release (8 bit) - \details Executes a STLB instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) -{ - __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Store-Release (16 bit) - \details Executes a STLH instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) -{ - __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Store-Release (32 bit) - \details Executes a STL instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) -{ - __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Load-Acquire Exclusive (8 bit) - \details Executes a LDAB exclusive instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -#define __LDAEXB (uint8_t)__builtin_arm_ldaex - - -/** - \brief Load-Acquire Exclusive (16 bit) - \details Executes a LDAH exclusive instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -#define __LDAEXH (uint16_t)__builtin_arm_ldaex - - -/** - \brief Load-Acquire Exclusive (32 bit) - \details Executes a LDA exclusive instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -#define __LDAEX (uint32_t)__builtin_arm_ldaex - - -/** - \brief Store-Release Exclusive (8 bit) - \details Executes a STLB exclusive instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STLEXB (uint32_t)__builtin_arm_stlex - - -/** - \brief Store-Release Exclusive (16 bit) - \details Executes a STLH exclusive instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STLEXH (uint32_t)__builtin_arm_stlex - - -/** - \brief Store-Release Exclusive (32 bit) - \details Executes a STL exclusive instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STLEX (uint32_t)__builtin_arm_stlex - -#endif /* (__ARM_ARCH >= 8) */ - -/** @}*/ /* end of group CMSIS_Core_InstructionInterface */ - - -/* ########################### Core Function Access ########################### */ -/** \ingroup CMSIS_Core_FunctionInterface - \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions - @{ - */ - -/** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __enable_irq(void) -{ - __ASM volatile ("cpsie i" : : : "memory"); -} - - -/** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting special-purpose register PRIMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_irq(void) -{ - __ASM volatile ("cpsid i" : : : "memory"); -} - - -/** - \brief Get Control Register - \details Returns the content of the Control Register. - \return Control Register value - */ -__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, control" : "=r" (result) ); - return (result); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Control Register (non-secure) - \details Returns the content of the non-secure Control Register when in secure mode. - \return non-secure Control Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); - return (result); -} -#endif - - -/** - \brief Set Control Register - \details Writes the given value to the Control Register. - \param [in] control Control Register value to set - */ -__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) -{ - __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); - __ISB(); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Control Register (non-secure) - \details Writes the given value to the non-secure Control Register when in secure state. - \param [in] control Control Register value to set - */ -__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) -{ - __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); - __ISB(); -} -#endif - - -/** - \brief Get IPSR Register - \details Returns the content of the IPSR Register. - \return IPSR Register value - */ -__STATIC_FORCEINLINE uint32_t __get_IPSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); - return (result); -} - - -/** - \brief Get APSR Register - \details Returns the content of the APSR Register. - \return APSR Register value - */ -__STATIC_FORCEINLINE uint32_t __get_APSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, apsr" : "=r" (result) ); - return (result); -} - - -/** - \brief Get xPSR Register - \details Returns the content of the xPSR Register. - \return xPSR Register value - */ -__STATIC_FORCEINLINE uint32_t __get_xPSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); - return (result); -} - - -/** - \brief Get Process Stack Pointer - \details Returns the current value of the Process Stack Pointer (PSP). - \return PSP Register value - */ -__STATIC_FORCEINLINE uint32_t __get_PSP(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, psp" : "=r" (result) ); - return (result); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Process Stack Pointer (non-secure) - \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. - \return PSP Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); - return (result); -} -#endif - - -/** - \brief Set Process Stack Pointer - \details Assigns the given value to the Process Stack Pointer (PSP). - \param [in] topOfProcStack Process Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) -{ - __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Process Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. - \param [in] topOfProcStack Process Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) -{ - __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); -} -#endif - - -/** - \brief Get Main Stack Pointer - \details Returns the current value of the Main Stack Pointer (MSP). - \return MSP Register value - */ -__STATIC_FORCEINLINE uint32_t __get_MSP(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, msp" : "=r" (result) ); - return (result); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Main Stack Pointer (non-secure) - \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. - \return MSP Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); - return (result); -} -#endif - - -/** - \brief Set Main Stack Pointer - \details Assigns the given value to the Main Stack Pointer (MSP). - \param [in] topOfMainStack Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) -{ - __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Main Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. - \param [in] topOfMainStack Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) -{ - __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); -} -#endif - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Stack Pointer (non-secure) - \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. - \return SP Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); - return (result); -} +#if (__ARM_FEATURE_CMSE == 3) + #ifndef __STACK_SEAL + #define __STACK_SEAL Image$$STACKSEAL$$ZI$$Base + #endif + + #ifndef __TZ_STACK_SEAL_SIZE + #define __TZ_STACK_SEAL_SIZE 8U + #endif + + #ifndef __TZ_STACK_SEAL_VALUE + #define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL + #endif + + __STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { + *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; + } +#endif -/** - \brief Set Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. - \param [in] topOfStack Stack Pointer value to set +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ */ -__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) -{ - __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); -} -#endif /** - \brief Get Priority Mask - \details Returns the current state of the priority mask bit from the Priority Mask Register. - \return Priority Mask value + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value */ -__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) +__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) { uint32_t result; - __ASM volatile ("MRS %0, primask" : "=r" (result) ); + __ASM volatile ("MRS %0, control" : "=r" (result) ); return (result); } #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Priority Mask (non-secure) - \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. - \return Priority Mask value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); - return (result); -} + /** + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return (result); + } #endif /** - \brief Set Priority Mask - \details Assigns the given value to the Priority Mask Register. - \param [in] priMask Priority Mask + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set */ -__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) { - __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); + __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); + __ISB(); } #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Priority Mask (non-secure) - \details Assigns the given value to the non-secure Priority Mask Register when in secure state. - \param [in] priMask Priority Mask - */ -__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) -{ - __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); -} + /** + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set + */ + __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) + { + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); + __ISB(); + } #endif -#if (__ARM_ARCH_ISA_THUMB >= 2) /** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. - Can only be executed in Privileged modes. + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value */ -__STATIC_FORCEINLINE void __enable_fault_irq(void) +__STATIC_FORCEINLINE uint32_t __get_IPSR(void) { - __ASM volatile ("cpsie f" : : : "memory"); -} - + uint32_t result; -/** - \brief Disable FIQ - \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_fault_irq(void) -{ - __ASM volatile ("cpsid f" : : : "memory"); + __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); + return (result); } /** - \brief Get Base Priority - \details Returns the current value of the Base Priority register. - \return Base Priority register value + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value */ -__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) +__STATIC_FORCEINLINE uint32_t __get_APSR(void) { uint32_t result; - __ASM volatile ("MRS %0, basepri" : "=r" (result) ); + __ASM volatile ("MRS %0, apsr" : "=r" (result) ); return (result); } -#if (__ARM_FEATURE_CMSE == 3) /** - \brief Get Base Priority (non-secure) - \details Returns the current value of the non-secure Base Priority register when in secure state. - \return Base Priority register value + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value */ -__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) +__STATIC_FORCEINLINE uint32_t __get_xPSR(void) { uint32_t result; - __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); return (result); } -#endif /** - \brief Set Base Priority - \details Assigns the given value to the Base Priority register. - \param [in] basePri Base Priority value to set + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value */ -__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) +__STATIC_FORCEINLINE uint32_t __get_PSP(void) { - __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); + uint32_t result; + + __ASM volatile ("MRS %0, psp" : "=r" (result) ); + return (result); } #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Base Priority (non-secure) - \details Assigns the given value to the non-secure Base Priority register when in secure state. - \param [in] basePri Base Priority value to set - */ -__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) -{ - __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); -} + /** + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return (result); + } #endif /** - \brief Set Base Priority with condition - \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, - or the new value increases the BASEPRI priority level. - \param [in] basePri Base Priority value to set + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set */ -__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) +__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) { - __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); + __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); } +#if (__ARM_FEATURE_CMSE == 3) + /** + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) + { + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); + } +#endif + + /** - \brief Get Fault Mask - \details Returns the current value of the Fault Mask register. - \return Fault Mask register value + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value */ -__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) +__STATIC_FORCEINLINE uint32_t __get_MSP(void) { uint32_t result; - __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + __ASM volatile ("MRS %0, msp" : "=r" (result) ); return (result); } #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Fault Mask (non-secure) - \details Returns the current value of the non-secure Fault Mask register when in secure state. - \return Fault Mask register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); - return (result); -} + /** + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return (result); + } #endif /** - \brief Set Fault Mask - \details Assigns the given value to the Fault Mask register. - \param [in] faultMask Fault Mask value to set + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set */ -__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) +__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) { - __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); + __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); } #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Fault Mask (non-secure) - \details Assigns the given value to the non-secure Fault Mask register when in secure state. - \param [in] faultMask Fault Mask value to set - */ -__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) -{ - __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); -} + /** + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) + { + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); + } #endif -#endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ - -#if (__ARM_ARCH >= 8) -/** - \brief Get Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - - \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). - \return PSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) -{ -#if (((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) - /* without main extensions, the non-secure PSPLIM is RAZ/WI */ - return (0U); -#else - uint32_t result; - __ASM volatile ("MRS %0, psplim" : "=r" (result) ); - return (result); +#if (__ARM_FEATURE_CMSE == 3) + /** + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return (result); + } + + + /** + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) + { + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); + } #endif -} -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Process Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \return PSPLIM Register value +/** + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) +__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) { -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure PSPLIM is RAZ/WI */ - return (0U); -#else uint32_t result; - __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + + __ASM volatile ("MRS %0, primask" : "=r" (result) ); return (result); -#endif } -#endif -/** - \brief Set Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - - \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set - */ -__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) -{ -#if (((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) - /* without main extensions, the non-secure PSPLIM is RAZ/WI */ - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); +#if (__ARM_FEATURE_CMSE == 3) + /** + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + return (result); + } #endif -} -#if (__ARM_FEATURE_CMSE == 3) /** - \brief Set Process Stack Pointer (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask */ -__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) +__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) { -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure PSPLIM is RAZ/WI */ - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); -#endif + __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); } -#endif - -/** - \brief Get Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). - \return MSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) -{ -#if (((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - return (0U); -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim" : "=r" (result) ); - return (result); +#if (__ARM_FEATURE_CMSE == 3) + /** + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask + */ + __STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) + { + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); + } #endif -} -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. +#if (__ARM_ARCH_ISA_THUMB >= 2) + /** + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value + */ + __STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, basepri" : "=r" (result) ); + return (result); + } - \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. - \return MSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - return (0U); -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); - return (result); -#endif -} -#endif + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return (result); + } + #endif -/** - \brief Set Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). - \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set - */ -__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) -{ -#if (((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); -#endif -} + /** + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set + */ + __STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) + { + __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); + } -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set + */ + __STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) + { + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); + } + #endif - \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. - \param [in] MainStackPtrLimit Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); -#endif -} -#endif -#endif /* (__ARM_ARCH >= 8) */ + /** + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set + */ + __STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) + { + __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); + } + + + /** + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value + */ + __STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + return (result); + } + + + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return (result); + } + #endif -/** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value - */ -__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) -{ -#if (__ARM_FP >= 1) - return (__builtin_arm_get_fpscr()); -#else - return (0U); -#endif -} + /** + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set + */ + __STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) + { + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); + } + + + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set + */ + __STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) + { + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); + } + #endif +#endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ -/** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set - */ -__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) -{ -#if (__ARM_FP >= 1) - __builtin_arm_set_fpscr(fpscr); -#else - (void)fpscr; -#endif -} +#if (__ARM_ARCH >= 8) + /** + \brief Get Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). + \return PSPLIM Register value + */ + __STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) + { + #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ + (__ARM_FEATURE_CMSE < 3) ) + /* without main extensions, the non-secure PSPLIM is RAZ/WI */ + return (0U); + #else + uint32_t result; + __ASM volatile ("MRS %0, psplim" : "=r" (result) ); + return (result); + #endif + } + + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure PSPLIM is RAZ/WI */ + return (0U); + #else + uint32_t result; + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return (result); + #endif + } + #endif + + + /** + \brief Set Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ + __STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) + { + #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ + (__ARM_FEATURE_CMSE < 3) ) + /* without main extensions, the non-secure PSPLIM is RAZ/WI */ + (void)ProcStackPtrLimit; + #else + __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); + #endif + } -/** @} end of CMSIS_Core_RegAccFunctions */ + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ + __STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure PSPLIM is RAZ/WI */ + (void)ProcStackPtrLimit; + #else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); + #endif + } + #endif + + + /** + \brief Get Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). + \return MSPLIM Register value + */ + __STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) + { + #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ + (__ARM_FEATURE_CMSE < 3) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + return (0U); + #else + uint32_t result; + __ASM volatile ("MRS %0, msplim" : "=r" (result) ); + return (result); + #endif + } -/* ################### Compiler specific Intrinsics ########################### */ -/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics - Access to dedicated SIMD instructions - @{ -*/ - -#if (__ARM_FEATURE_DSP == 1) -#define __SADD8 __sadd8 -#define __QADD8 __qadd8 -#define __SHADD8 __shadd8 -#define __UADD8 __uadd8 -#define __UQADD8 __uqadd8 -#define __UHADD8 __uhadd8 -#define __SSUB8 __ssub8 -#define __QSUB8 __qsub8 -#define __SHSUB8 __shsub8 -#define __USUB8 __usub8 -#define __UQSUB8 __uqsub8 -#define __UHSUB8 __uhsub8 -#define __SADD16 __sadd16 -#define __QADD16 __qadd16 -#define __SHADD16 __shadd16 -#define __UADD16 __uadd16 -#define __UQADD16 __uqadd16 -#define __UHADD16 __uhadd16 -#define __SSUB16 __ssub16 -#define __QSUB16 __qsub16 -#define __SHSUB16 __shsub16 -#define __USUB16 __usub16 -#define __UQSUB16 __uqsub16 -#define __UHSUB16 __uhsub16 -#define __SASX __sasx -#define __QASX __qasx -#define __SHASX __shasx -#define __UASX __uasx -#define __UQASX __uqasx -#define __UHASX __uhasx -#define __SSAX __ssax -#define __QSAX __qsax -#define __SHSAX __shsax -#define __USAX __usax -#define __UQSAX __uqsax -#define __UHSAX __uhsax -#define __USAD8 __usad8 -#define __USADA8 __usada8 -#define __SSAT16 __ssat16 -#define __USAT16 __usat16 -#define __UXTB16 __uxtb16 -#define __UXTAB16 __uxtab16 -#define __SXTB16 __sxtb16 -#define __SXTAB16 __sxtab16 -#define __SMUAD __smuad -#define __SMUADX __smuadx -#define __SMLAD __smlad -#define __SMLADX __smladx -#define __SMLALD __smlald -#define __SMLALDX __smlaldx -#define __SMUSD __smusd -#define __SMUSDX __smusdx -#define __SMLSD __smlsd -#define __SMLSDX __smlsdx -#define __SMLSLD __smlsld -#define __SMLSLDX __smlsldx -#define __SEL __sel -#define __QADD __qadd -#define __QSUB __qsub - -#define __PKHBT(ARG1,ARG2,ARG3) \ -__extension__ \ -({ \ - uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ - __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ - __RES; \ - }) - -#define __PKHTB(ARG1,ARG2,ARG3) \ -__extension__ \ -({ \ - uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ - if (ARG3 == 0) \ - __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ - else \ - __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ - __RES; \ - }) - -#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) - -#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) - -__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) -{ - int32_t result; + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + return (0U); + #else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return (result); + #endif + } + #endif + + + /** + \brief Set Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). + \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + */ + __STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) + { + #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ + (__ARM_FEATURE_CMSE < 3) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + (void)MainStackPtrLimit; + #else + __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); + #endif + } - __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); - return (result); -} -#endif /* (__ARM_FEATURE_DSP == 1) */ -/** @} end of group CMSIS_SIMD_intrinsics */ + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + (void)MainStackPtrLimit; + #else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); + #endif + } + #endif +#endif /* (__ARM_ARCH >= 8) */ +/** @} end of CMSIS_Core_RegAccFunctions */ #endif /* __CMSIS_CLANG_M_H */ diff --git a/CMSIS/Core/Include/m-profile/cmsis_gcc_m.h b/CMSIS/Core/Include/m-profile/cmsis_gcc_m.h index 98125b10e..65cf0e2f4 100644 --- a/CMSIS/Core/Include/m-profile/cmsis_gcc_m.h +++ b/CMSIS/Core/Include/m-profile/cmsis_gcc_m.h @@ -2,7 +2,7 @@ * @file cmsis_gcc_m.h * @brief CMSIS compiler GCC header file * @version V6.0.0 - * @date 27. July 2023 + * @date 4. August 2023 ******************************************************************************/ /* * Copyright (c) 2009-2023 Arm Limited. All rights reserved. @@ -25,7 +25,9 @@ #ifndef __CMSIS_GCC_M_H #define __CMSIS_GCC_M_H -#pragma clang system_header /* treat file as system include file */ +#ifndef __CMSIS_GCC_H + #error "This file must not be included directly" +#endif /* ignore some GCC warnings */ #pragma GCC diagnostic push @@ -33,1625 +35,691 @@ #pragma GCC diagnostic ignored "-Wconversion" #pragma GCC diagnostic ignored "-Wunused-parameter" -//#if (__ARM_ACLE >= 200) - #include -//#else -// #error Compiler must support ACLE V2.0 -//#endif /* (__ARM_ACLE >= 200) */ - -/* Fallback for __has_builtin */ -#ifndef __has_builtin - #define __has_builtin(x) (0) -#endif - -/* CMSIS compiler specific defines */ -#ifndef __ASM - #define __ASM __asm -#endif -#ifndef __INLINE - #define __INLINE inline -#endif -#ifndef __STATIC_INLINE - #define __STATIC_INLINE static inline -#endif -#ifndef __STATIC_FORCEINLINE - #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline -#endif -#ifndef __NO_RETURN - #define __NO_RETURN __attribute__((__noreturn__)) -#endif -#ifndef __USED - #define __USED __attribute__((used)) -#endif -#ifndef __WEAK - #define __WEAK __attribute__((weak)) -#endif -#ifndef __PACKED - #define __PACKED __attribute__((packed, aligned(1))) -#endif -#ifndef __PACKED_STRUCT - #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) -#endif -#ifndef __PACKED_UNION - #define __PACKED_UNION union __attribute__((packed, aligned(1))) -#endif -#ifndef __UNALIGNED_UINT16_WRITE - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wpacked" - #pragma GCC diagnostic ignored "-Wattributes" - __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; - #pragma GCC diagnostic pop - #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT16_READ - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wpacked" - #pragma GCC diagnostic ignored "-Wattributes" - __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; - #pragma GCC diagnostic pop - #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) -#endif -#ifndef __UNALIGNED_UINT32_WRITE - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wpacked" - #pragma GCC diagnostic ignored "-Wattributes" - __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; - #pragma GCC diagnostic pop - #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT32_READ - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wpacked" - #pragma GCC diagnostic ignored "-Wattributes" - __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; - #pragma GCC diagnostic pop - #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) -#endif -#ifndef __ALIGNED - #define __ALIGNED(x) __attribute__((aligned(x))) -#endif -#ifndef __RESTRICT - #define __RESTRICT __restrict -#endif -#ifndef __COMPILER_BARRIER - #define __COMPILER_BARRIER() __ASM volatile("":::"memory") -#endif -#ifndef __NO_INIT - #define __NO_INIT __attribute__ ((section (".bss.noinit"))) -#endif -#ifndef __ALIAS - #define __ALIAS(x) __attribute__ ((alias(x))) -#endif /* ######################### Startup and Lowlevel Init ######################## */ -#ifndef __PROGRAM_START -/** - \brief Initializes data and bss sections - \details This default implementations initialized all data and additional bss - sections relying on .copy.table and .zero.table specified properly - in the used linker script. - - */ -__STATIC_FORCEINLINE __NO_RETURN void __cmsis_start(void) -{ - extern void _start(void) __NO_RETURN; - - typedef struct __copy_table { - uint32_t const* src; - uint32_t* dest; - uint32_t wlen; - } __copy_table_t; - - typedef struct __zero_table { - uint32_t* dest; - uint32_t wlen; - } __zero_table_t; - - extern const __copy_table_t __copy_table_start__; - extern const __copy_table_t __copy_table_end__; - extern const __zero_table_t __zero_table_start__; - extern const __zero_table_t __zero_table_end__; - - for (__copy_table_t const* pTable = &__copy_table_start__; pTable < &__copy_table_end__; ++pTable) { - for(uint32_t i=0u; iwlen; ++i) { - pTable->dest[i] = pTable->src[i]; +#ifndef __PROGRAM_START + /** + \brief Initializes data and bss sections + \details This default implementations initialized all data and additional bss + sections relying on .copy.table and .zero.table specified properly + in the used linker script. + + */ + __STATIC_FORCEINLINE __NO_RETURN void __cmsis_start(void) + { + extern void _start(void) __NO_RETURN; + + typedef struct __copy_table { + uint32_t const* src; + uint32_t* dest; + uint32_t wlen; + } __copy_table_t; + + typedef struct __zero_table { + uint32_t* dest; + uint32_t wlen; + } __zero_table_t; + + extern const __copy_table_t __copy_table_start__; + extern const __copy_table_t __copy_table_end__; + extern const __zero_table_t __zero_table_start__; + extern const __zero_table_t __zero_table_end__; + + for (__copy_table_t const* pTable = &__copy_table_start__; pTable < &__copy_table_end__; ++pTable) { + for(uint32_t i=0u; iwlen; ++i) { + pTable->dest[i] = pTable->src[i]; + } } - } - - for (__zero_table_t const* pTable = &__zero_table_start__; pTable < &__zero_table_end__; ++pTable) { - for(uint32_t i=0u; iwlen; ++i) { - pTable->dest[i] = 0u; + + for (__zero_table_t const* pTable = &__zero_table_start__; pTable < &__zero_table_end__; ++pTable) { + for(uint32_t i=0u; iwlen; ++i) { + pTable->dest[i] = 0u; + } } + + _start(); } - - _start(); -} - -#define __PROGRAM_START __cmsis_start + + #define __PROGRAM_START __cmsis_start #endif #ifndef __INITIAL_SP -#define __INITIAL_SP __StackTop + #define __INITIAL_SP __StackTop #endif #ifndef __STACK_LIMIT -#define __STACK_LIMIT __StackLimit + #define __STACK_LIMIT __StackLimit #endif #ifndef __VECTOR_TABLE -#define __VECTOR_TABLE __Vectors + #define __VECTOR_TABLE __Vectors #endif #ifndef __VECTOR_TABLE_ATTRIBUTE -#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section(".vectors"))) + #define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section(".vectors"))) #endif #if (__ARM_FEATURE_CMSE == 3) -#ifndef __STACK_SEAL -#define __STACK_SEAL __StackSeal -#endif + #ifndef __STACK_SEAL + #define __STACK_SEAL __StackSeal + #endif -#ifndef __TZ_STACK_SEAL_SIZE -#define __TZ_STACK_SEAL_SIZE 8U -#endif + #ifndef __TZ_STACK_SEAL_SIZE + #define __TZ_STACK_SEAL_SIZE 8U + #endif -#ifndef __TZ_STACK_SEAL_VALUE -#define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL -#endif + #ifndef __TZ_STACK_SEAL_VALUE + #define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL + #endif -__STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { - *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; -} + __STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { + *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; + } #endif -/* ########################## Core Instruction Access ######################### */ -/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface - Access to dedicated instructions +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions @{ -*/ - -/* Define macros for porting to both thumb1 and thumb2. - * For thumb1, use low register (r0-r7), specified by constraint "l" - * Otherwise, use general registers, specified by constraint "r" */ -#if defined (__thumb__) && !defined (__thumb2__) -#define __CMSIS_GCC_OUT_REG(r) "=l" (r) -#define __CMSIS_GCC_RW_REG(r) "+l" (r) -#define __CMSIS_GCC_USE_REG(r) "l" (r) -#else -#define __CMSIS_GCC_OUT_REG(r) "=r" (r) -#define __CMSIS_GCC_RW_REG(r) "+r" (r) -#define __CMSIS_GCC_USE_REG(r) "r" (r) -#endif - -/** - \brief No Operation - \details No Operation does nothing. This instruction can be used for code alignment purposes. */ -#define __NOP() __ASM volatile ("nop") - /** - \brief Wait For Interrupt - \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value */ -#define __WFI() __ASM volatile ("wfi":::"memory") - +__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) +{ + uint32_t result; -/** - \brief Wait For Event - \details Wait For Event is a hint instruction that permits the processor to enter - a low-power state until one of a number of events occurs. - */ -#define __WFE() __ASM volatile ("wfe":::"memory") + __ASM volatile ("MRS %0, control" : "=r" (result) ); + return (result); +} -/** - \brief Send Event - \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. - */ -#define __SEV() __ASM volatile ("sev") +#if (__ARM_FEATURE_CMSE == 3) + /** + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return (result); + } +#endif /** - \brief Instruction Synchronization Barrier - \details Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or memory, - after the instruction has been completed. + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set */ -__STATIC_FORCEINLINE void __ISB(void) +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) { - __ASM volatile ("isb 0xF":::"memory"); + __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); + __ISB(); } -/** - \brief Data Synchronization Barrier - \details Acts as a special kind of Data Memory Barrier. - It completes when all explicit memory accesses before this instruction complete. - */ -__STATIC_FORCEINLINE void __DSB(void) -{ - __ASM volatile ("dsb 0xF":::"memory"); -} +#if (__ARM_FEATURE_CMSE == 3) + /** + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set + */ + __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) + { + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); + __ISB(); + } +#endif /** - \brief Data Memory Barrier - \details Ensures the apparent order of the explicit memory operations before - and after the instruction, without ensuring their completion. + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value */ -__STATIC_FORCEINLINE void __DMB(void) +__STATIC_FORCEINLINE uint32_t __get_IPSR(void) { - __ASM volatile ("dmb 0xF":::"memory"); + uint32_t result; + + __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); + return (result); } /** - \brief Reverse byte order (32 bit) - \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. - \param [in] value Value to reverse - \return Reversed value + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value */ -__STATIC_FORCEINLINE uint32_t __REV(uint32_t value) +__STATIC_FORCEINLINE uint32_t __get_APSR(void) { - return __builtin_bswap32(value); + uint32_t result; + + __ASM volatile ("MRS %0, apsr" : "=r" (result) ); + return (result); } /** - \brief Reverse byte order (16 bit) - \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. - \param [in] value Value to reverse - \return Reversed value + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value */ -__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value) +__STATIC_FORCEINLINE uint32_t __get_xPSR(void) { uint32_t result; - __ASM ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); return (result); } /** - \brief Reverse byte order (16 bit) - \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. - \param [in] value Value to reverse - \return Reversed value + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value */ -__STATIC_FORCEINLINE int16_t __REVSH(int16_t value) +__STATIC_FORCEINLINE uint32_t __get_PSP(void) { - return (int16_t)__builtin_bswap16(value); + uint32_t result; + + __ASM volatile ("MRS %0, psp" : "=r" (result) ); + return (result); } -/** - \brief Rotate Right in unsigned value (32 bit) - \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. - \param [in] op1 Value to rotate - \param [in] op2 Number of Bits to rotate - \return Rotated value - */ -__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) -{ - op2 %= 32U; - if (op2 == 0U) +#if (__ARM_FEATURE_CMSE == 3) + /** + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) { - return op1; + uint32_t result; + + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return (result); } - return (op1 >> op2) | (op1 << (32U - op2)); -} - - -/** - \brief Breakpoint - \details Causes the processor to enter Debug state. - Debug tools can use this to investigate system state when the instruction at a particular address is reached. - \param [in] value is ignored by the processor. - If required, a debugger can use it to store additional information about the breakpoint. - */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) +#endif /** - \brief Reverse bit order of value - \details Reverses the bit order of the given value. - \param [in] value Value to reverse - \return Reversed value + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set */ -__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) +__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) { - uint32_t result; + __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); +} -#if (__ARM_ARCH_ISA_THUMB >= 2) - __ASM ("rbit %0, %1" : "=r" (result) : "r" (value) ); -#else - uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */ - result = value; /* r will be reversed bits of v; first get LSB of v */ - for (value >>= 1U; value != 0U; value >>= 1U) +#if (__ARM_FEATURE_CMSE == 3) + /** + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) { - result <<= 1U; - result |= value & 1U; - s--; + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); } - result <<= s; /* shift when v's highest bits are zero */ #endif - return (result); -} /** - \brief Count leading zeros - \details Counts the number of leading zeros of a data value. - \param [in] value Value to count the leading zeros - \return number of leading zeros in value + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value */ -__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) +__STATIC_FORCEINLINE uint32_t __get_MSP(void) { - /* Even though __builtin_clz produces a CLZ instruction on ARM, formally - __builtin_clz(0) is undefined behaviour, so handle this case specially. - This guarantees ARM-compatible results if happening to compile on a non-ARM - target, and ensures the compiler doesn't decide to activate any - optimisations using the logic "value was passed to __builtin_clz, so it - is non-zero". - ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a - single CLZ instruction. - */ - if (value == 0U) - { - return 32U; - } - return __builtin_clz(value); -} + uint32_t result; + __ASM volatile ("MRS %0, msp" : "=r" (result) ); + return (result); +} -#if (__ARM_FEATURE_SAT >= 1) -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value - */ -#define __SSAT(value, sat) __ssat(value, sat) +#if (__ARM_FEATURE_CMSE == 3) + /** + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return (result); + } +#endif -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -#define __USAT(value, sat) __usat(value, sat) -#else /* (__ARM_FEATURE_SAT >= 1) */ /** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set */ -__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) { - if ((sat >= 1U) && (sat <= 32U)) - { - const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); - const int32_t min = -1 - max ; - if (val > max) - { - return (max); - } - else if (val < min) - { - return (min); - } - } - return (val); + __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); } -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) -{ - if (sat <= 31U) +#if (__ARM_FEATURE_CMSE == 3) + /** + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) { - const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) - { - return (max); - } - else if (val < 0) - { - return (0U); - } + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); } - return ((uint32_t)val); -} -#endif /* (__ARM_FEATURE_SAT >= 1) */ +#endif -#if (__ARM_FEATURE_LDREX >= 1) -/** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. - */ -__STATIC_FORCEINLINE void __CLREX(void) -{ - __ASM volatile ("clrex" ::: "memory"); -} +#if (__ARM_FEATURE_CMSE == 3) + /** + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return (result); + } + + + /** + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) + { + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); + } +#endif /** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value */ -__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) +__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) { uint32_t result; - __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); - return ((uint8_t) result); /* Add explicit type cast here */ + __ASM volatile ("MRS %0, primask" : "=r" (result) ); + return (result); } -/** - \brief STR Exclusive (8 bit) - \details Executes a exclusive STR instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) -{ - uint32_t result; - - __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); - return (result); -} -#endif /* (__ARM_FEATURE_LDREX >= 1) */ +#if (__ARM_FEATURE_CMSE == 3) + /** + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + return (result); + } +#endif -#if (__ARM_FEATURE_LDREX >= 2) /** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask */ -__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) +__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) { - uint32_t result; - - __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); - return ((uint16_t)result); /* Add explicit type cast here */ + __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); } -/** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) -{ - uint32_t result; +#if (__ARM_FEATURE_CMSE == 3) + /** + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask + */ + __STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) + { + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); + } +#endif - __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); - return (result); -} -#endif /* (__ARM_FEATURE_LDREX >= 2) */ +#if (__ARM_ARCH_ISA_THUMB >= 2) + /** + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value + */ + __STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, basepri" : "=r" (result) ); + return (result); + } -#if (__ARM_FEATURE_LDREX >= 4) -/** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) -{ - uint32_t result; - __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); - return (result); -} + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return (result); + } + #endif -/** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) -{ - uint32_t result; + /** + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set + */ + __STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) + { + __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); + } - __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); - return (result); -} -#endif /* (__ARM_FEATURE_LDREX >= 4) */ + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set + */ + __STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) + { + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); + } + #endif -#if (__ARM_ARCH_ISA_THUMB >= 2) -/** - \brief Rotate Right with Extend (32 bit) - \details Moves each bit of a bitstring right by one bit. - The carry input is shifted in at the left end of the bitstring. - \param [in] value Value to rotate - \return Rotated value - */ -__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) -{ - uint32_t result; - - __ASM volatile ("rrx %0, %1" : "=r" (result) : "r" (value)); - return (result); -} - - -/** - \brief LDRT Unprivileged (8 bit) - \details Executes a Unprivileged LDRT instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint8_t)result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (16 bit) - \details Executes a Unprivileged LDRT instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint16_t)result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (32 bit) - \details Executes a Unprivileged LDRT instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return (result); -} - - -/** - \brief STRT Unprivileged (8 bit) - \details Executes a Unprivileged STRT instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) -{ - __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (16 bit) - \details Executes a Unprivileged STRT instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) -{ - __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (32 bit) - \details Executes a Unprivileged STRT instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) -{ - __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); -} -#endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ - - -#if (__ARM_ARCH >= 8) -/** - \brief Load-Acquire (8 bit) - \details Executes a LDAB instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint8_t)result); /* Add explicit type cast here */ -} - - -/** - \brief Load-Acquire (16 bit) - \details Executes a LDAH instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint16_t)result); /* Add explicit type cast here */ -} - - -/** - \brief Load-Acquire (32 bit) - \details Executes a LDA instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return (result); -} - - -/** - \brief Store-Release (8 bit) - \details Executes a STLB instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) -{ - __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Store-Release (16 bit) - \details Executes a STLH instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) -{ - __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Store-Release (32 bit) - \details Executes a STL instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) -{ - __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Load-Acquire Exclusive (8 bit) - \details Executes a LDAB exclusive instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDAEXB(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldaexb %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint8_t)result); /* Add explicit type cast here */ -} - - -/** - \brief Load-Acquire Exclusive (16 bit) - \details Executes a LDAH exclusive instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDAEXH(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldaexh %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint16_t)result); /* Add explicit type cast here */ -} - - -/** - \brief Load-Acquire Exclusive (32 bit) - \details Executes a LDA exclusive instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDAEX(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldaex %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return (result); -} - - -/** - \brief Store-Release Exclusive (8 bit) - \details Executes a STLB exclusive instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("stlexb %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); - return (result); -} - - -/** - \brief Store-Release Exclusive (16 bit) - \details Executes a STLH exclusive instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("stlexh %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); - return (result); -} - - -/** - \brief Store-Release Exclusive (32 bit) - \details Executes a STL exclusive instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("stlex %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); - return (result); -} - -#endif /* (__ARM_ARCH >= 8) */ - -/** @}*/ /* end of group CMSIS_Core_InstructionInterface */ - - -/* ########################### Core Function Access ########################### */ -/** \ingroup CMSIS_Core_FunctionInterface - \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions - @{ - */ - -/** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __enable_irq(void) -{ - __ASM volatile ("cpsie i" : : : "memory"); -} - - -/** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting special-purpose register PRIMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_irq(void) -{ - __ASM volatile ("cpsid i" : : : "memory"); -} - - -/** - \brief Get Control Register - \details Returns the content of the Control Register. - \return Control Register value - */ -__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, control" : "=r" (result) ); - return (result); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Control Register (non-secure) - \details Returns the content of the non-secure Control Register when in secure mode. - \return non-secure Control Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); - return (result); -} -#endif - - -/** - \brief Set Control Register - \details Writes the given value to the Control Register. - \param [in] control Control Register value to set - */ -__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) -{ - __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); - __ISB(); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Control Register (non-secure) - \details Writes the given value to the non-secure Control Register when in secure state. - \param [in] control Control Register value to set - */ -__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) -{ - __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); - __ISB(); -} -#endif - - -/** - \brief Get IPSR Register - \details Returns the content of the IPSR Register. - \return IPSR Register value - */ -__STATIC_FORCEINLINE uint32_t __get_IPSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); - return (result); -} - - -/** - \brief Get APSR Register - \details Returns the content of the APSR Register. - \return APSR Register value - */ -__STATIC_FORCEINLINE uint32_t __get_APSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, apsr" : "=r" (result) ); - return (result); -} - - -/** - \brief Get xPSR Register - \details Returns the content of the xPSR Register. - \return xPSR Register value - */ -__STATIC_FORCEINLINE uint32_t __get_xPSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); - return (result); -} - - -/** - \brief Get Process Stack Pointer - \details Returns the current value of the Process Stack Pointer (PSP). - \return PSP Register value - */ -__STATIC_FORCEINLINE uint32_t __get_PSP(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, psp" : "=r" (result) ); - return (result); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Process Stack Pointer (non-secure) - \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. - \return PSP Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); - return (result); -} -#endif - - -/** - \brief Set Process Stack Pointer - \details Assigns the given value to the Process Stack Pointer (PSP). - \param [in] topOfProcStack Process Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) -{ - __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Process Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. - \param [in] topOfProcStack Process Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) -{ - __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); -} -#endif - - -/** - \brief Get Main Stack Pointer - \details Returns the current value of the Main Stack Pointer (MSP). - \return MSP Register value - */ -__STATIC_FORCEINLINE uint32_t __get_MSP(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, msp" : "=r" (result) ); - return (result); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Main Stack Pointer (non-secure) - \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. - \return MSP Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); - return (result); -} -#endif - - -/** - \brief Set Main Stack Pointer - \details Assigns the given value to the Main Stack Pointer (MSP). - \param [in] topOfMainStack Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) -{ - __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Main Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. - \param [in] topOfMainStack Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) -{ - __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); -} -#endif - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Stack Pointer (non-secure) - \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. - \return SP Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); - return (result); -} - - -/** - \brief Set Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. - \param [in] topOfStack Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) -{ - __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); -} -#endif - - -/** - \brief Get Priority Mask - \details Returns the current state of the priority mask bit from the Priority Mask Register. - \return Priority Mask value - */ -__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, primask" : "=r" (result) ); - return (result); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Priority Mask (non-secure) - \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. - \return Priority Mask value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); - return (result); -} -#endif - - -/** - \brief Set Priority Mask - \details Assigns the given value to the Priority Mask Register. - \param [in] priMask Priority Mask - */ -__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) -{ - __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Priority Mask (non-secure) - \details Assigns the given value to the non-secure Priority Mask Register when in secure state. - \param [in] priMask Priority Mask - */ -__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) -{ - __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); -} -#endif - - -#if (__ARM_ARCH_ISA_THUMB >= 2) -/** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __enable_fault_irq(void) -{ - __ASM volatile ("cpsie f" : : : "memory"); -} - - -/** - \brief Disable FIQ - \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_fault_irq(void) -{ - __ASM volatile ("cpsid f" : : : "memory"); -} - - -/** - \brief Get Base Priority - \details Returns the current value of the Base Priority register. - \return Base Priority register value - */ -__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, basepri" : "=r" (result) ); - return (result); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Base Priority (non-secure) - \details Returns the current value of the non-secure Base Priority register when in secure state. - \return Base Priority register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); - return (result); -} -#endif - - -/** - \brief Set Base Priority - \details Assigns the given value to the Base Priority register. - \param [in] basePri Base Priority value to set - */ -__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) -{ - __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Base Priority (non-secure) - \details Assigns the given value to the non-secure Base Priority register when in secure state. - \param [in] basePri Base Priority value to set - */ -__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) -{ - __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); -} -#endif - - -/** - \brief Set Base Priority with condition - \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, - or the new value increases the BASEPRI priority level. - \param [in] basePri Base Priority value to set - */ -__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) -{ - __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); -} + /** + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set + */ + __STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) + { + __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); + } -/** - \brief Get Fault Mask - \details Returns the current value of the Fault Mask register. - \return Fault Mask register value - */ -__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) -{ - uint32_t result; - __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); - return (result); -} + /** + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value + */ + __STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + return (result); + } #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Fault Mask (non-secure) - \details Returns the current value of the non-secure Fault Mask register when in secure state. - \return Fault Mask register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); - return (result); -} -#endif - - -/** - \brief Set Fault Mask - \details Assigns the given value to the Fault Mask register. - \param [in] faultMask Fault Mask value to set - */ -__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) -{ - __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); -} - + /** + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return (result); + } + #endif + + + /** + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set + */ + __STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) + { + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); + } -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Fault Mask (non-secure) - \details Assigns the given value to the non-secure Fault Mask register when in secure state. - \param [in] faultMask Fault Mask value to set - */ -__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) -{ - __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); -} -#endif + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set + */ + __STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) + { + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); + } + #endif #endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ #if (__ARM_ARCH >= 8) -/** - \brief Get Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - - \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). - \return PSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) -{ -#if (((__ARM_ARCH_8M_MAIN__ < 1) && \ + /** + \brief Get Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). + \return PSPLIM Register value + */ + __STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) + { + #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) - /* without main extensions, the non-secure PSPLIM is RAZ/WI */ - return (0U); -#else - uint32_t result; - __ASM volatile ("MRS %0, psplim" : "=r" (result) ); - return (result); -#endif -} - + (__ARM_FEATURE_CMSE < 3) ) + /* without main extensions, the non-secure PSPLIM is RAZ/WI */ + return (0U); + #else + uint32_t result; + __ASM volatile ("MRS %0, psplim" : "=r" (result) ); + return (result); + #endif + } + #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Process Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \return PSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure PSPLIM is RAZ/WI */ - return (0U); -#else - uint32_t result; - __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); - return (result); -#endif -} -#endif - + /** + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure PSPLIM is RAZ/WI */ + return (0U); + #else + uint32_t result; + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return (result); + #endif + } + #endif -/** - \brief Set Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set - */ -__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) -{ -#if (((__ARM_ARCH_8M_MAIN__ < 1) && \ + /** + \brief Set Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ + __STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) + { + #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) - /* without main extensions, the non-secure PSPLIM is RAZ/WI */ - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); -#endif -} - - + (__ARM_FEATURE_CMSE < 3) ) + /* without main extensions, the non-secure PSPLIM is RAZ/WI */ + (void)ProcStackPtrLimit; + #else + __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); + #endif + } + + #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Process Stack Pointer (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set - */ -__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure PSPLIM is RAZ/WI */ - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); -#endif -} -#endif - - -/** - \brief Get Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + /** + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ + __STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure PSPLIM is RAZ/WI */ + (void)ProcStackPtrLimit; + #else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); + #endif + } + #endif + + + /** + \brief Get Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). - \return MSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) -{ -#if (((__ARM_ARCH_8M_MAIN__ < 1) && \ + + \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). + \return MSPLIM Register value + */ + __STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) + { + #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - return (0U); -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim" : "=r" (result) ); - return (result); -#endif -} - - + (__ARM_FEATURE_CMSE < 3) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + return (0U); + #else + uint32_t result; + __ASM volatile ("MRS %0, msplim" : "=r" (result) ); + return (result); + #endif + } + + #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. - \return MSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - return (0U); -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); - return (result); -#endif -} -#endif - - -/** - \brief Set Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + /** + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + return (0U); + #else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return (result); + #endif + } + #endif + + + /** + \brief Set Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). - \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set - */ -__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) -{ -#if (((__ARM_ARCH_8M_MAIN__ < 1) && \ + + \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). + \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + */ + __STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) + { + #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); -#endif -} - - + (__ARM_FEATURE_CMSE < 3) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + (void)MainStackPtrLimit; + #else + __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); + #endif + } + + #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. - \param [in] MainStackPtrLimit Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); -#endif -} -#endif - + /** + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + (void)MainStackPtrLimit; + #else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); + #endif + } + #endif #endif /* (__ARM_ARCH >= 8) */ -/** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value - */ -__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) -{ -#if (__ARM_FP >= 1) - return (__builtin_arm_get_fpscr()); -#else - return (0U); -#endif -} - - -/** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set - */ -__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) -{ -#if (__ARM_FP >= 1) - __builtin_arm_set_fpscr(fpscr); -#else - (void)fpscr; -#endif -} - - -/** @} end of CMSIS_Core_RegAccFunctions */ - - -/* ################### Compiler specific Intrinsics ########################### */ -/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics - Access to dedicated SIMD instructions - @{ -*/ - -#if (__ARM_FEATURE_DSP == 1) -#define __SADD8 __sadd8 -#define __QADD8 __qadd8 -#define __SHADD8 __shadd8 -#define __UADD8 __uadd8 -#define __UQADD8 __uqadd8 -#define __UHADD8 __uhadd8 -#define __SSUB8 __ssub8 -#define __QSUB8 __qsub8 -#define __SHSUB8 __shsub8 -#define __USUB8 __usub8 -#define __UQSUB8 __uqsub8 -#define __UHSUB8 __uhsub8 -#define __SADD16 __sadd16 -#define __QADD16 __qadd16 -#define __SHADD16 __shadd16 -#define __UADD16 __uadd16 -#define __UQADD16 __uqadd16 -#define __UHADD16 __uhadd16 -#define __SSUB16 __ssub16 -#define __QSUB16 __qsub16 -#define __SHSUB16 __shsub16 -#define __USUB16 __usub16 -#define __UQSUB16 __uqsub16 -#define __UHSUB16 __uhsub16 -#define __SASX __sasx -#define __QASX __qasx -#define __SHASX __shasx -#define __UASX __uasx -#define __UQASX __uqasx -#define __UHASX __uhasx -#define __SSAX __ssax -#define __QSAX __qsax -#define __SHSAX __shsax -#define __USAX __usax -#define __UQSAX __uqsax -#define __UHSAX __uhsax -#define __USAD8 __usad8 -#define __USADA8 __usada8 -#define __SSAT16 __ssat16 -#define __USAT16 __usat16 -#define __UXTB16 __uxtb16 -#define __UXTAB16 __uxtab16 -#define __SXTB16 __sxtb16 -#define __SXTAB16 __sxtab16 -#define __SMUAD __smuad -#define __SMUADX __smuadx -#define __SMLAD __smlad -#define __SMLADX __smladx -#define __SMLALD __smlald -#define __SMLALDX __smlaldx -#define __SMUSD __smusd -#define __SMUSDX __smusdx -#define __SMLSD __smlsd -#define __SMLSDX __smlsdx -#define __SMLSLD __smlsld -#define __SMLSLDX __smlsldx -#define __SEL __sel -#define __QADD __qadd -#define __QSUB __qsub - -#define __PKHBT(ARG1,ARG2,ARG3) \ -__extension__ \ -({ \ - uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ - __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ - __RES; \ - }) - -#define __PKHTB(ARG1,ARG2,ARG3) \ -__extension__ \ -({ \ - uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ - if (ARG3 == 0) \ - __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ - else \ - __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ - __RES; \ - }) - -#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) - -#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) - -__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) -{ - int32_t result; - - __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); - return (result); -} -#endif /* (__ARM_FEATURE_DSP == 1) */ -/** @} end of group CMSIS_SIMD_intrinsics */ +/*@} end of CMSIS_Core_RegAccFunctions */ #pragma GCC diagnostic pop diff --git a/CMSIS/Core/Include/m-profile/cmsis_iccarm_m.h b/CMSIS/Core/Include/m-profile/cmsis_iccarm_m.h index 82f3b2378..29faeb48a 100644 --- a/CMSIS/Core/Include/m-profile/cmsis_iccarm_m.h +++ b/CMSIS/Core/Include/m-profile/cmsis_iccarm_m.h @@ -1,8 +1,8 @@ /**************************************************************************//** * @file cmsis_iccarm_m.h * @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file - * @version V5.4.0 - * @date 20. January 2023 + * @version V6.0.0 + * @date 4. August 2023 ******************************************************************************/ //------------------------------------------------------------------------------ @@ -29,292 +29,54 @@ #ifndef __CMSIS_ICCARM_M_H__ #define __CMSIS_ICCARM_M_H__ -#ifndef __ICCARM__ - #error This file should only be compiled by ICCARM -#endif - -#pragma system_include - -#define __IAR_FT _Pragma("inline=forced") __intrinsic - -#if (__VER__ >= 8000000) - #define __ICCARM_V8 1 -#else - #define __ICCARM_V8 0 -#endif - -#ifndef __ALIGNED - #if __ICCARM_V8 - #define __ALIGNED(x) __attribute__((aligned(x))) - #elif (__VER__ >= 7080000) - /* Needs IAR language extensions */ - #define __ALIGNED(x) __attribute__((aligned(x))) - #else - #warning No compiler specific solution for __ALIGNED.__ALIGNED is ignored. - #define __ALIGNED(x) - #endif -#endif - - -/* Define compiler macros for CPU architecture, used in CMSIS 5. - */ -#if __ARM_ARCH_6M__ || __ARM_ARCH_7M__ || __ARM_ARCH_7EM__ || __ARM_ARCH_8M_BASE__ || __ARM_ARCH_8M_MAIN__ -/* Macros already defined */ -#else - #if defined(__ARM8M_MAINLINE__) || defined(__ARM8EM_MAINLINE__) - #define __ARM_ARCH_8M_MAIN__ 1 - #elif defined(__ARM8M_BASELINE__) - #define __ARM_ARCH_8M_BASE__ 1 - #elif defined(__ARM_ARCH_PROFILE) && __ARM_ARCH_PROFILE == 'M' - #if __ARM_ARCH == 6 - #define __ARM_ARCH_6M__ 1 - #elif __ARM_ARCH == 7 - #if __ARM_FEATURE_DSP - #define __ARM_ARCH_7EM__ 1 - #else - #define __ARM_ARCH_7M__ 1 - #endif - #endif /* __ARM_ARCH */ - #endif /* __ARM_ARCH_PROFILE == 'M' */ -#endif - -/* Alternativ core deduction for older ICCARM's */ -#if !defined(__ARM_ARCH_6M__) && !defined(__ARM_ARCH_7M__) && !defined(__ARM_ARCH_7EM__) && \ - !defined(__ARM_ARCH_8M_BASE__) && !defined(__ARM_ARCH_8M_MAIN__) - #if defined(__ARM6M__) && (__CORE__ == __ARM6M__) - #define __ARM_ARCH_6M__ 1 - #elif defined(__ARM7M__) && (__CORE__ == __ARM7M__) - #define __ARM_ARCH_7M__ 1 - #elif defined(__ARM7EM__) && (__CORE__ == __ARM7EM__) - #define __ARM_ARCH_7EM__ 1 - #elif defined(__ARM8M_BASELINE__) && (__CORE == __ARM8M_BASELINE__) - #define __ARM_ARCH_8M_BASE__ 1 - #elif defined(__ARM8M_MAINLINE__) && (__CORE == __ARM8M_MAINLINE__) - #define __ARM_ARCH_8M_MAIN__ 1 - #elif defined(__ARM8EM_MAINLINE__) && (__CORE == __ARM8EM_MAINLINE__) - #define __ARM_ARCH_8M_MAIN__ 1 - #else - #error "Unknown target." - #endif -#endif - - - -#if defined(__ARM_ARCH_6M__) && __ARM_ARCH_6M__==1 - #define __IAR_M0_FAMILY 1 -#elif defined(__ARM_ARCH_8M_BASE__) && __ARM_ARCH_8M_BASE__==1 - #define __IAR_M0_FAMILY 1 -#else - #define __IAR_M0_FAMILY 0 -#endif - -#ifndef __NO_INIT - #define __NO_INIT __attribute__ ((section (".noinit"))) -#endif -#ifndef __ALIAS - #define __ALIAS(x) __attribute__ ((alias(x))) -#endif - -#ifndef __ASM - #define __ASM __asm -#endif - -#ifndef __COMPILER_BARRIER - #define __COMPILER_BARRIER() __ASM volatile("":::"memory") -#endif - -#ifndef __INLINE - #define __INLINE inline -#endif - -#ifndef __NO_RETURN - #if __ICCARM_V8 - #define __NO_RETURN __attribute__((__noreturn__)) - #else - #define __NO_RETURN _Pragma("object_attribute=__noreturn") - #endif -#endif - -#ifndef __PACKED - #if __ICCARM_V8 - #define __PACKED __attribute__((packed, aligned(1))) - #else - /* Needs IAR language extensions */ - #define __PACKED __packed - #endif -#endif - -#ifndef __PACKED_STRUCT - #if __ICCARM_V8 - #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) - #else - /* Needs IAR language extensions */ - #define __PACKED_STRUCT __packed struct - #endif -#endif - -#ifndef __PACKED_UNION - #if __ICCARM_V8 - #define __PACKED_UNION union __attribute__((packed, aligned(1))) - #else - /* Needs IAR language extensions */ - #define __PACKED_UNION __packed union - #endif -#endif - -#ifndef __RESTRICT - #if __ICCARM_V8 - #define __RESTRICT __restrict - #else - /* Needs IAR language extensions */ - #define __RESTRICT restrict - #endif -#endif - -#ifndef __STATIC_INLINE - #define __STATIC_INLINE static inline -#endif - -#ifndef __FORCEINLINE - #define __FORCEINLINE _Pragma("inline=forced") -#endif - -#ifndef __STATIC_FORCEINLINE - #define __STATIC_FORCEINLINE __FORCEINLINE __STATIC_INLINE -#endif - -#ifndef __UNALIGNED_UINT16_READ -#pragma language=save -#pragma language=extended -__IAR_FT uint16_t __iar_uint16_read(void const *ptr) -{ - return *(__packed uint16_t*)(ptr); -} -#pragma language=restore -#define __UNALIGNED_UINT16_READ(PTR) __iar_uint16_read(PTR) -#endif - - -#ifndef __UNALIGNED_UINT16_WRITE -#pragma language=save -#pragma language=extended -__IAR_FT void __iar_uint16_write(void const *ptr, uint16_t val) -{ - *(__packed uint16_t*)(ptr) = val;; -} -#pragma language=restore -#define __UNALIGNED_UINT16_WRITE(PTR,VAL) __iar_uint16_write(PTR,VAL) -#endif - -#ifndef __UNALIGNED_UINT32_READ -#pragma language=save -#pragma language=extended -__IAR_FT uint32_t __iar_uint32_read(void const *ptr) -{ - return *(__packed uint32_t*)(ptr); -} -#pragma language=restore -#define __UNALIGNED_UINT32_READ(PTR) __iar_uint32_read(PTR) -#endif - -#ifndef __UNALIGNED_UINT32_WRITE -#pragma language=save -#pragma language=extended -__IAR_FT void __iar_uint32_write(void const *ptr, uint32_t val) -{ - *(__packed uint32_t*)(ptr) = val;; -} -#pragma language=restore -#define __UNALIGNED_UINT32_WRITE(PTR,VAL) __iar_uint32_write(PTR,VAL) +#ifndef __CMSIS_ICCARM_H__ + #error "This file must not be included directly" #endif -#ifndef __UNALIGNED_UINT32 /* deprecated */ -#pragma language=save -#pragma language=extended -__packed struct __iar_u32 { uint32_t v; }; -#pragma language=restore -#define __UNALIGNED_UINT32(PTR) (((struct __iar_u32 *)(PTR))->v) -#endif - -#ifndef __USED - #if __ICCARM_V8 - #define __USED __attribute__((used)) - #else - #define __USED _Pragma("__root") - #endif +#ifndef __ICCARM__ + #error This file should only be compiled by ICCARM #endif -#undef __WEAK /* undo the definition from DLib_Defaults.h */ -#ifndef __WEAK - #if __ICCARM_V8 - #define __WEAK __attribute__((weak)) - #else - #define __WEAK _Pragma("__weak") - #endif -#endif #ifndef __PROGRAM_START -#define __PROGRAM_START __iar_program_start + #define __PROGRAM_START __iar_program_start #endif #ifndef __INITIAL_SP -#define __INITIAL_SP CSTACK$$Limit + #define __INITIAL_SP CSTACK$$Limit #endif #ifndef __STACK_LIMIT -#define __STACK_LIMIT CSTACK$$Base + #define __STACK_LIMIT CSTACK$$Base #endif #ifndef __VECTOR_TABLE -#define __VECTOR_TABLE __vector_table + #define __VECTOR_TABLE __vector_table #endif #ifndef __VECTOR_TABLE_ATTRIBUTE -#define __VECTOR_TABLE_ATTRIBUTE @".intvec" + #define __VECTOR_TABLE_ATTRIBUTE @".intvec" #endif #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) -#ifndef __STACK_SEAL -#define __STACK_SEAL STACKSEAL$$Base -#endif - -#ifndef __TZ_STACK_SEAL_SIZE -#define __TZ_STACK_SEAL_SIZE 8U -#endif + #ifndef __STACK_SEAL + #define __STACK_SEAL STACKSEAL$$Base + #endif -#ifndef __TZ_STACK_SEAL_VALUE -#define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL -#endif + #ifndef __TZ_STACK_SEAL_SIZE + #define __TZ_STACK_SEAL_SIZE 8U + #endif -__STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { - *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; -} -#endif + #ifndef __TZ_STACK_SEAL_VALUE + #define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL + #endif -#ifndef __ICCARM_INTRINSICS_VERSION__ - #define __ICCARM_INTRINSICS_VERSION__ 0 + __STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { + *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; + } #endif #if __ICCARM_INTRINSICS_VERSION__ == 2 - - #if defined(__CLZ) - #undef __CLZ - #endif - #if defined(__REVSH) - #undef __REVSH - #endif - #if defined(__RBIT) - #undef __RBIT - #endif - #if defined(__SSAT) - #undef __SSAT - #endif - #if defined(__USAT) - #undef __USAT - #endif - - #include "iccarm_builtin.h" - #define __disable_fault_irq __iar_builtin_disable_fiq #define __disable_irq __iar_builtin_disable_interrupt #define __enable_fault_irq __iar_builtin_enable_fiq @@ -362,11 +124,11 @@ __STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { #define __set_BASEPRI(VALUE) (__arm_wsr("BASEPRI", (VALUE))) #define __set_BASEPRI_MAX(VALUE) (__arm_wsr("BASEPRI_MAX", (VALUE))) -__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) -{ - __arm_wsr("CONTROL", control); - __iar_builtin_ISB(); -} + __STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) + { + __arm_wsr("CONTROL", control); + __iar_builtin_ISB(); + } #define __set_FAULTMASK(VALUE) (__arm_wsr("FAULTMASK", (VALUE))) #define __set_MSP(VALUE) (__arm_wsr("MSP", (VALUE))) @@ -390,11 +152,11 @@ __STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) #define __TZ_get_CONTROL_NS() (__arm_rsr("CONTROL_NS")) -__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) -{ - __arm_wsr("CONTROL_NS", control); - __iar_builtin_ISB(); -} + __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) + { + __arm_wsr("CONTROL_NS", control); + __iar_builtin_ISB(); + } #define __TZ_get_PSP_NS() (__arm_rsr("PSP_NS")) #define __TZ_set_PSP_NS(VALUE) (__arm_wsr("PSP_NS", (VALUE))) @@ -422,124 +184,6 @@ __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) #define __TZ_get_MSPLIM_NS() (__arm_rsr("MSPLIM_NS")) #define __TZ_set_MSPLIM_NS(VALUE) (__arm_wsr("MSPLIM_NS", (VALUE))) - #define __NOP __iar_builtin_no_operation - - #define __CLZ __iar_builtin_CLZ - #define __CLREX __iar_builtin_CLREX - - #define __DMB __iar_builtin_DMB - #define __DSB __iar_builtin_DSB - #define __ISB __iar_builtin_ISB - - #define __LDREXB __iar_builtin_LDREXB - #define __LDREXH __iar_builtin_LDREXH - #define __LDREXW __iar_builtin_LDREX - - #define __RBIT __iar_builtin_RBIT - #define __REV __iar_builtin_REV - #define __REV16 __iar_builtin_REV16 - - __IAR_FT int16_t __REVSH(int16_t val) - { - return (int16_t) __iar_builtin_REVSH(val); - } - - #define __ROR __iar_builtin_ROR - #define __RRX __iar_builtin_RRX - - #define __SEV __iar_builtin_SEV - - #if !__IAR_M0_FAMILY - #define __SSAT __iar_builtin_SSAT - #endif - - #define __STREXB __iar_builtin_STREXB - #define __STREXH __iar_builtin_STREXH - #define __STREXW __iar_builtin_STREX - - #if !__IAR_M0_FAMILY - #define __USAT __iar_builtin_USAT - #endif - - #define __WFE __iar_builtin_WFE - #define __WFI __iar_builtin_WFI - - #if __ARM_MEDIA__ - #define __SADD8 __iar_builtin_SADD8 - #define __QADD8 __iar_builtin_QADD8 - #define __SHADD8 __iar_builtin_SHADD8 - #define __UADD8 __iar_builtin_UADD8 - #define __UQADD8 __iar_builtin_UQADD8 - #define __UHADD8 __iar_builtin_UHADD8 - #define __SSUB8 __iar_builtin_SSUB8 - #define __QSUB8 __iar_builtin_QSUB8 - #define __SHSUB8 __iar_builtin_SHSUB8 - #define __USUB8 __iar_builtin_USUB8 - #define __UQSUB8 __iar_builtin_UQSUB8 - #define __UHSUB8 __iar_builtin_UHSUB8 - #define __SADD16 __iar_builtin_SADD16 - #define __QADD16 __iar_builtin_QADD16 - #define __SHADD16 __iar_builtin_SHADD16 - #define __UADD16 __iar_builtin_UADD16 - #define __UQADD16 __iar_builtin_UQADD16 - #define __UHADD16 __iar_builtin_UHADD16 - #define __SSUB16 __iar_builtin_SSUB16 - #define __QSUB16 __iar_builtin_QSUB16 - #define __SHSUB16 __iar_builtin_SHSUB16 - #define __USUB16 __iar_builtin_USUB16 - #define __UQSUB16 __iar_builtin_UQSUB16 - #define __UHSUB16 __iar_builtin_UHSUB16 - #define __SASX __iar_builtin_SASX - #define __QASX __iar_builtin_QASX - #define __SHASX __iar_builtin_SHASX - #define __UASX __iar_builtin_UASX - #define __UQASX __iar_builtin_UQASX - #define __UHASX __iar_builtin_UHASX - #define __SSAX __iar_builtin_SSAX - #define __QSAX __iar_builtin_QSAX - #define __SHSAX __iar_builtin_SHSAX - #define __USAX __iar_builtin_USAX - #define __UQSAX __iar_builtin_UQSAX - #define __UHSAX __iar_builtin_UHSAX - #define __USAD8 __iar_builtin_USAD8 - #define __USADA8 __iar_builtin_USADA8 - #define __SSAT16 __iar_builtin_SSAT16 - #define __USAT16 __iar_builtin_USAT16 - #define __UXTB16 __iar_builtin_UXTB16 - #define __UXTAB16 __iar_builtin_UXTAB16 - #define __SXTB16 __iar_builtin_SXTB16 - #define __SXTAB16 __iar_builtin_SXTAB16 - #define __SMUAD __iar_builtin_SMUAD - #define __SMUADX __iar_builtin_SMUADX - #define __SMMLA __iar_builtin_SMMLA - #define __SMLAD __iar_builtin_SMLAD - #define __SMLADX __iar_builtin_SMLADX - #define __SMLALD __iar_builtin_SMLALD - #define __SMLALDX __iar_builtin_SMLALDX - #define __SMUSD __iar_builtin_SMUSD - #define __SMUSDX __iar_builtin_SMUSDX - #define __SMLSD __iar_builtin_SMLSD - #define __SMLSDX __iar_builtin_SMLSDX - #define __SMLSLD __iar_builtin_SMLSLD - #define __SMLSLDX __iar_builtin_SMLSLDX - #define __SEL __iar_builtin_SEL - #define __QADD __iar_builtin_QADD - #define __QSUB __iar_builtin_QSUB - #define __PKHBT __iar_builtin_PKHBT - #define __PKHTB __iar_builtin_PKHTB - #endif - -#else /* __ICCARM_INTRINSICS_VERSION__ == 2 */ - - #if __IAR_M0_FAMILY - /* Avoid clash between intrinsics.h and arm_math.h when compiling for Cortex-M0. */ - #define __CLZ __cmsis_iar_clz_not_active - #define __SSAT __cmsis_iar_ssat_not_active - #define __USAT __cmsis_iar_usat_not_active - #define __RBIT __cmsis_iar_rbit_not_active - #define __get_APSR __cmsis_iar_get_APSR_not_active - #endif - #if (!((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ (defined (__FPU_USED ) && (__FPU_USED == 1U)) )) @@ -548,7 +192,7 @@ __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) #endif #ifdef __INTRINSICS_INCLUDED - #error intrinsics.h is already included previously! + #error intrinsics.h is already included previously! #endif #include @@ -596,8 +240,8 @@ __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) return res; } - #endif - + #endif /* __IAR_M0_FAMILY */ +#else /* __ICCARM_INTRINSICS_VERSION__ == 2 */ #if (!((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ (defined (__FPU_USED ) && (__FPU_USED == 1U)) )) #undef __get_FPSCR @@ -615,30 +259,8 @@ __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) #define __get_xPSR __get_PSR - #if (!defined(__ARM_ARCH_6M__) || __ARM_ARCH_6M__==0) - - __IAR_FT uint32_t __LDREXW(uint32_t volatile *ptr) - { - return __LDREX((unsigned long *)ptr); - } - - __IAR_FT uint32_t __STREXW(uint32_t value, uint32_t volatile *ptr) - { - return __STREX(value, (unsigned long *)ptr); - } - #endif - - /* __CORTEX_M is defined in core_cm0.h, core_cm3.h and core_cm4.h. */ #if (__CORTEX_M >= 0x03) - - __IAR_FT uint32_t __RRX(uint32_t value) - { - uint32_t result; - __ASM volatile("RRX %0, %1" : "=r"(result) : "r" (value)); - return(result); - } - __IAR_FT void __set_BASEPRI_MAX(uint32_t value) { __asm volatile("MSR BASEPRI_MAX,%0"::"r" (value)); @@ -647,15 +269,8 @@ __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) #define __enable_fault_irq __enable_fiq #define __disable_fault_irq __disable_fiq - - #endif /* (__CORTEX_M >= 0x03) */ - __IAR_FT uint32_t __ROR(uint32_t op1, uint32_t op2) - { - return (op1 >> op2) | (op1 << ((sizeof(op1)*8)-op2)); - } - #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) @@ -827,173 +442,15 @@ __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) __asm volatile("MSR MSPLIM_NS,%0" :: "r" (value)); } - #endif /* __ARM_ARCH_8M_MAIN__ or __ARM_ARCH_8M_BASE__ */ - + #endif /* (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ #endif /* __ICCARM_INTRINSICS_VERSION__ == 2 */ -#define __BKPT(value) __asm volatile ("BKPT %0" : : "i"(value)) #if __IAR_M0_FAMILY - __STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat) - { - if ((sat >= 1U) && (sat <= 32U)) - { - const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); - const int32_t min = -1 - max ; - if (val > max) - { - return max; - } - else if (val < min) - { - return min; - } - } - return val; - } - __STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat) - { - if (sat <= 31U) - { - const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) - { - return max; - } - else if (val < 0) - { - return 0U; - } - } - return (uint32_t)val; - } #endif -#if (__CORTEX_M >= 0x03) /* __CORTEX_M is defined in core_cm0.h, core_cm3.h and core_cm4.h. */ - - __IAR_FT uint8_t __LDRBT(volatile uint8_t *addr) - { - uint32_t res; - __ASM volatile ("LDRBT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); - return ((uint8_t)res); - } - - __IAR_FT uint16_t __LDRHT(volatile uint16_t *addr) - { - uint32_t res; - __ASM volatile ("LDRHT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); - return ((uint16_t)res); - } - - __IAR_FT uint32_t __LDRT(volatile uint32_t *addr) - { - uint32_t res; - __ASM volatile ("LDRT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); - return res; - } - - __IAR_FT void __STRBT(uint8_t value, volatile uint8_t *addr) - { - __ASM volatile ("STRBT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory"); - } - - __IAR_FT void __STRHT(uint16_t value, volatile uint16_t *addr) - { - __ASM volatile ("STRHT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory"); - } - - __IAR_FT void __STRT(uint32_t value, volatile uint32_t *addr) - { - __ASM volatile ("STRT %1, [%0]" : : "r" (addr), "r" (value) : "memory"); - } - -#endif /* (__CORTEX_M >= 0x03) */ - -#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) - - - __IAR_FT uint8_t __LDAB(volatile uint8_t *ptr) - { - uint32_t res; - __ASM volatile ("LDAB %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); - return ((uint8_t)res); - } - - __IAR_FT uint16_t __LDAH(volatile uint16_t *ptr) - { - uint32_t res; - __ASM volatile ("LDAH %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); - return ((uint16_t)res); - } - - __IAR_FT uint32_t __LDA(volatile uint32_t *ptr) - { - uint32_t res; - __ASM volatile ("LDA %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); - return res; - } - - __IAR_FT void __STLB(uint8_t value, volatile uint8_t *ptr) - { - __ASM volatile ("STLB %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); - } - - __IAR_FT void __STLH(uint16_t value, volatile uint16_t *ptr) - { - __ASM volatile ("STLH %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); - } - - __IAR_FT void __STL(uint32_t value, volatile uint32_t *ptr) - { - __ASM volatile ("STL %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); - } - - __IAR_FT uint8_t __LDAEXB(volatile uint8_t *ptr) - { - uint32_t res; - __ASM volatile ("LDAEXB %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); - return ((uint8_t)res); - } - - __IAR_FT uint16_t __LDAEXH(volatile uint16_t *ptr) - { - uint32_t res; - __ASM volatile ("LDAEXH %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); - return ((uint16_t)res); - } - - __IAR_FT uint32_t __LDAEX(volatile uint32_t *ptr) - { - uint32_t res; - __ASM volatile ("LDAEX %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); - return res; - } - - __IAR_FT uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) - { - uint32_t res; - __ASM volatile ("STLEXB %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory"); - return res; - } - - __IAR_FT uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) - { - uint32_t res; - __ASM volatile ("STLEXH %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory"); - return res; - } - - __IAR_FT uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) - { - uint32_t res; - __ASM volatile ("STLEX %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory"); - return res; - } - -#endif /* __ARM_ARCH_8M_MAIN__ or __ARM_ARCH_8M_BASE__ */ - #undef __IAR_FT #undef __IAR_M0_FAMILY #undef __ICCARM_V8 diff --git a/CMSIS/Core/Include/m-profile/cmsis_tiarmclang_m.h b/CMSIS/Core/Include/m-profile/cmsis_tiarmclang_m.h index 1230a2f78..d0876228b 100644 --- a/CMSIS/Core/Include/m-profile/cmsis_tiarmclang_m.h +++ b/CMSIS/Core/Include/m-profile/cmsis_tiarmclang_m.h @@ -1,8 +1,8 @@ /**************************************************************************//** * @file cmsis_tiarmclang_m.h * @brief CMSIS compiler tiarmclang header file - * @version V1.1.0 - * @date 27. July 2023 + * @version V6.0.0 + * @date 04. August 2023 ******************************************************************************/ /* * Copyright (c) 2023 Arm Limited. All rights reserved. @@ -27,88 +27,12 @@ #pragma clang system_header /* treat file as system include file */ -#if (__ARM_ACLE >= 200) - #include -#else - #error Compiler must support ACLE V2.0 -#endif /* (__ARM_ACLE >= 200) */ - -/* CMSIS compiler specific defines */ -#ifndef __ASM - #define __ASM __asm -#endif -#ifndef __INLINE - #define __INLINE __inline -#endif -#ifndef __STATIC_INLINE - #define __STATIC_INLINE static __inline -#endif -#ifndef __STATIC_FORCEINLINE - #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline -#endif -#ifndef __NO_RETURN - #define __NO_RETURN __attribute__((__noreturn__)) -#endif -#ifndef __USED - #define __USED __attribute__((used)) -#endif -#ifndef __WEAK - #define __WEAK __attribute__((weak)) -#endif -#ifndef __PACKED - #define __PACKED __attribute__((packed, aligned(1))) -#endif -#ifndef __PACKED_STRUCT - #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) -#endif -#ifndef __PACKED_UNION - #define __PACKED_UNION union __attribute__((packed, aligned(1))) -#endif -#ifndef __UNALIGNED_UINT16_WRITE - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT16_READ - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) -#endif -#ifndef __UNALIGNED_UINT32_WRITE - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT32_READ - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) -#endif -#ifndef __ALIGNED - #define __ALIGNED(x) __attribute__((aligned(x))) -#endif -#ifndef __RESTRICT - #define __RESTRICT __restrict -#endif -#ifndef __COMPILER_BARRIER - #define __COMPILER_BARRIER() __ASM volatile("":::"memory") -#endif -#ifndef __NO_INIT - #define __NO_INIT __attribute__ ((section (".bss.noinit"))) -#endif -#ifndef __ALIAS - #define __ALIAS(x) __attribute__ ((alias(x))) +#ifndef __CMSIS_TIARMCLANG_H + #error "This file must not be included directly" #endif /* ######################### Startup and Lowlevel Init ######################## */ + #ifndef __PROGRAM_START #define __PROGRAM_START _c_int00 #endif @@ -142,575 +66,18 @@ #define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL #endif - __STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; } #endif -/* ########################## Core Instruction Access ######################### */ -/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface - Access to dedicated instructions - @{ -*/ - -/* Define macros for porting to both thumb1 and thumb2. - * For thumb1, use low register (r0-r7), specified by constraint "l" - * Otherwise, use general registers, specified by constraint "r" */ -#if defined (__thumb__) && !defined (__thumb2__) -#define __CMSIS_GCC_OUT_REG(r) "=l" (r) -#define __CMSIS_GCC_RW_REG(r) "+l" (r) -#define __CMSIS_GCC_USE_REG(r) "l" (r) -#else -#define __CMSIS_GCC_OUT_REG(r) "=r" (r) -#define __CMSIS_GCC_RW_REG(r) "+r" (r) -#define __CMSIS_GCC_USE_REG(r) "r" (r) -#endif - -/** - \brief No Operation - \details No Operation does nothing. This instruction can be used for code alignment purposes. - */ -#define __NOP() __nop() - - -/** - \brief Wait For Interrupt - \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. - */ -#define __WFI() __wfi() - - -/** - \brief Wait For Event - \details Wait For Event is a hint instruction that permits the processor to enter - a low-power state until one of a number of events occurs. - */ -#define __WFE() __wfe() - - -/** - \brief Send Event - \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. - */ -#define __SEV() __sev() - - -/** - \brief Instruction Synchronization Barrier - \details Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or memory, - after the instruction has been completed. - */ -#define __ISB() __isb(0xF) - - -/** - \brief Data Synchronization Barrier - \details Acts as a special kind of Data Memory Barrier. - It completes when all explicit memory accesses before this instruction complete. - */ -#define __DSB() __dsb(0xF) - - -/** - \brief Data Memory Barrier - \details Ensures the apparent order of the explicit memory operations before - and after the instruction, without ensuring their completion. - */ -#define __DMB() __dmb(0xF) - - -/** - \brief Reverse byte order (32 bit) - \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REV(value) __rev(value) - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REV16(value) __rev16(value) - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REVSH(value) __revsh(value) - - -/** - \brief Rotate Right in unsigned value (32 bit) - \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. - \param [in] op1 Value to rotate - \param [in] op2 Number of Bits to rotate - \return Rotated value - */ -#define __ROR(op1, op2) __ror(op1, op2) - - -/** - \brief Breakpoint - \details Causes the processor to enter Debug state. - Debug tools can use this to investigate system state when the instruction at a particular address is reached. - \param [in] value is ignored by the processor. - If required, a debugger can use it to store additional information about the breakpoint. - */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) - - -/** - \brief Reverse bit order of value - \details Reverses the bit order of the given value. - \param [in] value Value to reverse - \return Reversed value - */ -#define __RBIT(value) __rbit(value) - - -/** - \brief Count leading zeros - \details Counts the number of leading zeros of a data value. - \param [in] value Value to count the leading zeros - \return number of leading zeros in value - */ -#define __CLZ(value) __clz(value) - - -/* __ARM_FEATURE_SAT is wrong for for Armv8-M Baseline devices */ -#if ((__ARM_FEATURE_SAT >= 1) && \ - (__ARM_ARCH_ISA_THUMB >= 2) ) -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value - */ -#define __SSAT(value, sat) __ssat(value, sat) - - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -#define __USAT(value, sat) __usat(value, sat) - -#else /* (__ARM_FEATURE_SAT >= 1) */ -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value - */ -__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) -{ - if ((sat >= 1U) && (sat <= 32U)) - { - const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); - const int32_t min = -1 - max ; - if (val > max) - { - return (max); - } - else if (val < min) - { - return (min); - } - } - return (val); -} - - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) -{ - if (sat <= 31U) - { - const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) - { - return (max); - } - else if (val < 0) - { - return (0U); - } - } - return ((uint32_t)val); -} -#endif /* (__ARM_FEATURE_SAT >= 1) */ - - -#if (__ARM_FEATURE_LDREX >= 1) -/** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. - */ -#define __CLREX __builtin_arm_clrex - - -/** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -#define __LDREXB (uint8_t)__builtin_arm_ldrex - - -/** - \brief STR Exclusive (8 bit) - \details Executes a exclusive STR instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXB (uint32_t)__builtin_arm_strex -#endif /* (__ARM_FEATURE_LDREX >= 1) */ - - -#if (__ARM_FEATURE_LDREX >= 2) -/** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -#define __LDREXH (uint16_t)__builtin_arm_ldrex - - -/** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXH (uint32_t)__builtin_arm_strex -#endif /* (__ARM_FEATURE_LDREX >= 2) */ - - -#if (__ARM_FEATURE_LDREX >= 4) -/** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -#define __LDREXW (uint32_t)__builtin_arm_ldrex - - -/** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXW (uint32_t)__builtin_arm_strex -#endif /* (__ARM_FEATURE_LDREX >= 4) */ - - -#if (__ARM_ARCH_ISA_THUMB >= 2) -/** - \brief Rotate Right with Extend (32 bit) - \details Moves each bit of a bitstring right by one bit. - The carry input is shifted in at the left end of the bitstring. - \param [in] value Value to rotate - \return Rotated value - */ -__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) -{ - uint32_t result; - - __ASM volatile ("rrx %0, %1" : "=r" (result) : "r" (value)); - return (result); -} - - -/** - \brief LDRT Unprivileged (8 bit) - \details Executes a Unprivileged LDRT instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint8_t)result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (16 bit) - \details Executes a Unprivileged LDRT instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint16_t)result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (32 bit) - \details Executes a Unprivileged LDRT instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return (result); -} - - -/** - \brief STRT Unprivileged (8 bit) - \details Executes a Unprivileged STRT instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) -{ - __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (16 bit) - \details Executes a Unprivileged STRT instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) -{ - __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (32 bit) - \details Executes a Unprivileged STRT instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) -{ - __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); -} -#endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ - - -#if (__ARM_ARCH >= 8) -/** - \brief Load-Acquire (8 bit) - \details Executes a LDAB instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint8_t)result); /* Add explicit type cast here */ -} - - -/** - \brief Load-Acquire (16 bit) - \details Executes a LDAH instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint16_t)result); /* Add explicit type cast here */ -} - - -/** - \brief Load-Acquire (32 bit) - \details Executes a LDA instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return (result); -} - - -/** - \brief Store-Release (8 bit) - \details Executes a STLB instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) -{ - __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Store-Release (16 bit) - \details Executes a STLH instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) -{ - __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Store-Release (32 bit) - \details Executes a STL instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) -{ - __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Load-Acquire Exclusive (8 bit) - \details Executes a LDAB exclusive instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -#define __LDAEXB (uint8_t)__builtin_arm_ldaex - - -/** - \brief Load-Acquire Exclusive (16 bit) - \details Executes a LDAH exclusive instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -#define __LDAEXH (uint16_t)__builtin_arm_ldaex - - -/** - \brief Load-Acquire Exclusive (32 bit) - \details Executes a LDA exclusive instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -#define __LDAEX (uint32_t)__builtin_arm_ldaex - - -/** - \brief Store-Release Exclusive (8 bit) - \details Executes a STLB exclusive instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STLEXB (uint32_t)__builtin_arm_stlex - - -/** - \brief Store-Release Exclusive (16 bit) - \details Executes a STLH exclusive instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STLEXH (uint32_t)__builtin_arm_stlex - - -/** - \brief Store-Release Exclusive (32 bit) - \details Executes a STL exclusive instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STLEX (uint32_t)__builtin_arm_stlex - -#endif /* (__ARM_ARCH >= 8) */ - -/** @}*/ /* end of group CMSIS_Core_InstructionInterface */ - - /* ########################### Core Function Access ########################### */ /** \ingroup CMSIS_Core_FunctionInterface \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions @{ */ -/** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. - Can only be executed in Privileged modes. - */ -#ifndef __ARM_COMPAT_H -__STATIC_FORCEINLINE void __enable_irq(void) -{ - __ASM volatile ("cpsie i" : : : "memory"); -} -#endif - - -/** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting special-purpose register PRIMASK. - Can only be executed in Privileged modes. - */ -#ifndef __ARM_COMPAT_H -__STATIC_FORCEINLINE void __disable_irq(void) -{ - __ASM volatile ("cpsid i" : : : "memory"); -} -#endif - /** \brief Get Control Register @@ -1000,454 +367,309 @@ __STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) #if (__ARM_ARCH_ISA_THUMB >= 2) -/** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __enable_fault_irq(void) -{ - __ASM volatile ("cpsie f" : : : "memory"); -} - - -/** - \brief Disable FIQ - \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_fault_irq(void) -{ - __ASM volatile ("cpsid f" : : : "memory"); -} - - -/** - \brief Get Base Priority - \details Returns the current value of the Base Priority register. - \return Base Priority register value - */ -__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, basepri" : "=r" (result) ); - return (result); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Base Priority (non-secure) - \details Returns the current value of the non-secure Base Priority register when in secure state. - \return Base Priority register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); - return (result); -} -#endif - - -/** - \brief Set Base Priority - \details Assigns the given value to the Base Priority register. - \param [in] basePri Base Priority value to set - */ -__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) -{ - __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Base Priority (non-secure) - \details Assigns the given value to the non-secure Base Priority register when in secure state. - \param [in] basePri Base Priority value to set - */ -__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) -{ - __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); -} -#endif - - -/** - \brief Set Base Priority with condition - \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, - or the new value increases the BASEPRI priority level. - \param [in] basePri Base Priority value to set - */ -__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) -{ - __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); -} - - -/** - \brief Get Fault Mask - \details Returns the current value of the Fault Mask register. - \return Fault Mask register value - */ -__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); - return (result); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Fault Mask (non-secure) - \details Returns the current value of the non-secure Fault Mask register when in secure state. - \return Fault Mask register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); - return (result); -} -#endif - - -/** - \brief Set Fault Mask - \details Assigns the given value to the Fault Mask register. - \param [in] faultMask Fault Mask value to set - */ -__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) -{ - __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Fault Mask (non-secure) - \details Assigns the given value to the non-secure Fault Mask register when in secure state. - \param [in] faultMask Fault Mask value to set - */ -__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) -{ - __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); -} -#endif - + /** + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value + */ + __STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, basepri" : "=r" (result) ); + return (result); + } + + + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return (result); + } + #endif + + + /** + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set + */ + __STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) + { + __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); + } + + + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set + */ + __STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) + { + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); + } + #endif + + + /** + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set + */ + __STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) + { + __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); + } + + + /** + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value + */ + __STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + return (result); + } + + + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return (result); + } + #endif + + + /** + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set + */ + __STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) + { + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); + } + + + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set + */ + __STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) + { + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); + } + #endif #endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ #if (__ARM_ARCH >= 8) -/** - \brief Get Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - - \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). - \return PSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) -{ -#if (((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) - /* without main extensions, the non-secure PSPLIM is RAZ/WI */ - return (0U); -#else - uint32_t result; - __ASM volatile ("MRS %0, psplim" : "=r" (result) ); - return (result); -#endif -} - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Process Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \return PSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure PSPLIM is RAZ/WI */ - return (0U); -#else - uint32_t result; - __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); - return (result); -#endif -} -#endif - - -/** - \brief Set Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - - \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set - */ -__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) -{ -#if (((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) - /* without main extensions, the non-secure PSPLIM is RAZ/WI */ - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); -#endif -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Process Stack Pointer (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set - */ -__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure PSPLIM is RAZ/WI */ - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); -#endif -} -#endif - - -/** - \brief Get Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). - \return MSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) -{ -#if (((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - return (0U); -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim" : "=r" (result) ); - return (result); -#endif -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. - \return MSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - return (0U); -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); - return (result); -#endif -} -#endif - - -/** - \brief Set Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). - \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set - */ -__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) -{ -#if (((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); -#endif -} - - -#if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. - \param [in] MainStackPtrLimit Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); -#endif -} -#endif - + /** + \brief Get Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). + \return PSPLIM Register value + */ + __STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) + { + #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ + (__ARM_FEATURE_CMSE < 3) ) + /* without main extensions, the non-secure PSPLIM is RAZ/WI */ + return (0U); + #else + uint32_t result; + __ASM volatile ("MRS %0, psplim" : "=r" (result) ); + return (result); + #endif + } + + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure PSPLIM is RAZ/WI */ + return (0U); + #else + uint32_t result; + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return (result); + #endif + } + #endif + + + /** + \brief Set Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ + __STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) + { + #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ + (__ARM_FEATURE_CMSE < 3) ) + /* without main extensions, the non-secure PSPLIM is RAZ/WI */ + (void)ProcStackPtrLimit; + #else + __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); + #endif + } + + + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ + __STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure PSPLIM is RAZ/WI */ + (void)ProcStackPtrLimit; + #else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); + #endif + } + #endif + + + /** + \brief Get Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). + \return MSPLIM Register value + */ + __STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) + { + #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ + (__ARM_FEATURE_CMSE < 3) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + return (0U); + #else + uint32_t result; + __ASM volatile ("MRS %0, msplim" : "=r" (result) ); + return (result); + #endif + } + + + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + return (0U); + #else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return (result); + #endif + } + #endif + + + /** + \brief Set Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). + \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + */ + __STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) + { + #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ + (__ARM_FEATURE_CMSE < 3) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + (void)MainStackPtrLimit; + #else + __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); + #endif + } + + + #if (__ARM_FEATURE_CMSE == 3) + /** + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + (void)MainStackPtrLimit; + #else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); + #endif + } + #endif #endif /* (__ARM_ARCH >= 8) */ - - -/** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value - */ -__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) -{ -#if (__ARM_FP >= 1) - return (__builtin_arm_get_fpscr()); -#else - return (0U); -#endif -} - - -/** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set - */ -__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) -{ -#if (__ARM_FP >= 1) - __builtin_arm_set_fpscr(fpscr); -#else - (void)fpscr; -#endif -} - - /** @} end of CMSIS_Core_RegAccFunctions */ -/* ################### Compiler specific Intrinsics ########################### */ -/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics - Access to dedicated SIMD instructions - @{ -*/ - -#if (__ARM_FEATURE_DSP == 1) -#define __SADD8 __sadd8 -#define __QADD8 __qadd8 -#define __SHADD8 __shadd8 -#define __UADD8 __uadd8 -#define __UQADD8 __uqadd8 -#define __UHADD8 __uhadd8 -#define __SSUB8 __ssub8 -#define __QSUB8 __qsub8 -#define __SHSUB8 __shsub8 -#define __USUB8 __usub8 -#define __UQSUB8 __uqsub8 -#define __UHSUB8 __uhsub8 -#define __SADD16 __sadd16 -#define __QADD16 __qadd16 -#define __SHADD16 __shadd16 -#define __UADD16 __uadd16 -#define __UQADD16 __uqadd16 -#define __UHADD16 __uhadd16 -#define __SSUB16 __ssub16 -#define __QSUB16 __qsub16 -#define __SHSUB16 __shsub16 -#define __USUB16 __usub16 -#define __UQSUB16 __uqsub16 -#define __UHSUB16 __uhsub16 -#define __SASX __sasx -#define __QASX __qasx -#define __SHASX __shasx -#define __UASX __uasx -#define __UQASX __uqasx -#define __UHASX __uhasx -#define __SSAX __ssax -#define __QSAX __qsax -#define __SHSAX __shsax -#define __USAX __usax -#define __UQSAX __uqsax -#define __UHSAX __uhsax -#define __USAD8 __usad8 -#define __USADA8 __usada8 -#define __SSAT16 __ssat16 -#define __USAT16 __usat16 -#define __UXTB16 __uxtb16 -#define __UXTAB16 __uxtab16 -#define __SXTB16 __sxtb16 -#define __SXTAB16 __sxtab16 -#define __SMUAD __smuad -#define __SMUADX __smuadx -#define __SMLAD __smlad -#define __SMLADX __smladx -#define __SMLALD __smlald -#define __SMLALDX __smlaldx -#define __SMUSD __smusd -#define __SMUSDX __smusdx -#define __SMLSD __smlsd -#define __SMLSDX __smlsdx -#define __SMLSLD __smlsld -#define __SMLSLDX __smlsldx -#define __SEL __sel -#define __QADD __qadd -#define __QSUB __qsub - -#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ - ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) - -#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ - ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) - -#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) - -#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) - -__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) -{ - int32_t result; - - __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); - return (result); -} - -#endif /* (__ARM_FEATURE_DSP == 1) */ -/** @} end of group CMSIS_SIMD_intrinsics */ - - #endif /* __CMSIS_TIARMCLANG_M_H */ diff --git a/CMSIS/Core/Include/r-profile/armv7r.h b/CMSIS/Core/Include/r-profile/armv7r.h new file mode 100644 index 000000000..25952ff6e --- /dev/null +++ b/CMSIS/Core/Include/r-profile/armv7r.h @@ -0,0 +1,471 @@ +/**************************************************************************//** + * @file armv7r.h + * @brief CMSIS Cortex-R Core Peripheral Access Layer Header File for ARMv7-R + * @version V1.0.0 + * @date 2. July 2023 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __ARM_V7R_GENERIC +#define __ARM_V7R_GENERIC + +#ifdef __cplusplus + extern "C" { +#endif + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ + /** + \ingroup ARMv7-R + @{ + */ + +#include "cmsis_version.h" + +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TMS470__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ +#include "armv7r_cp15.h" + +#ifdef __cplusplus +} +#endif + +#endif /* __ARM_V7R_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __ARM_V7R_DEPENDANT +#define __ARM_V7R_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + + /* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VIC_PRESENT + #define __VIC_PRESENT 0U + #warning "__VIC_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __GIC_PRESENT + #define __GIC_PRESENT 1U + #warning "__GIC_PRESENT not defined in device header file; using default!" + #endif + + #if (__GIC_PRESENT == 1U) && (__VIC_PRESENT == 1U) + #error "Only one Interrupt Controller can be used" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ICACHE_PRESENT + #define __ICACHE_PRESENT 0U + #warning "__ICACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DCACHE_PRESENT + #define __DCACHE_PRESENT 0U + #warning "__DCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DTCM_PRESENT + #define __DTCM_PRESENT 0U + #warning "__DTCM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ECC_PRESENT + #define __ECC_PRESENT 0U + #warning "__ECC_PRESENT not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +#ifdef __cplusplus + #define __I volatile /*!< \brief Defines 'read only' permissions */ +#else + #define __I volatile const /*!< \brief Defines 'read only' permissions */ +#endif +#define __O volatile /*!< \brief Defines 'write only' permissions */ +#define __IO volatile /*!< \brief Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*!< \brief Defines 'read only' structure member permissions */ +#define __OM volatile /*!< \brief Defines 'write only' structure member permissions */ +#define __IOM volatile /*!< \brief Defines 'read / write' structure member permissions */ +#define RESERVED(N, T) T RESERVED##N; // placeholder struct members used for "reserved" areas + +/** @} end of group ARMv7-R */ + + + + /******************************************************************************* + * Register Abstraction + Core Register contain: + - CPSR + - CP15 Registers + - L2C-310 Cache Controller + - Generic Interrupt Controller Distributor + - Generic Interrupt Controller Interface + ******************************************************************************/ + +/* Core Register CPSR */ +typedef union +{ + struct + { + uint32_t M:5; /*!< \brief bit: 0.. 4 Mode field */ + uint32_t T:1; /*!< \brief bit: 5 Thumb execution state bit */ + uint32_t F:1; /*!< \brief bit: 6 FIQ mask bit */ + uint32_t I:1; /*!< \brief bit: 7 IRQ mask bit */ + uint32_t A:1; /*!< \brief bit: 8 Asynchronous abort mask bit */ + uint32_t E:1; /*!< \brief bit: 9 Endianness execution state bit */ + uint32_t IT1:6; /*!< \brief bit: 10..15 If-Then execution state bits 2-7 */ + uint32_t GE:4; /*!< \brief bit: 16..19 Greater than or Equal flags */ + RESERVED(0:4, uint32_t) + uint32_t J:1; /*!< \brief bit: 24 Jazelle bit */ + uint32_t IT0:2; /*!< \brief bit: 25..26 If-Then execution state bits 0-1 */ + uint32_t Q:1; /*!< \brief bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< \brief bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< \brief bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< \brief bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< \brief bit: 31 Negative condition code flag */ + } b; /*!< \brief Structure used for bit access */ + uint32_t w; /*!< \brief Type used for word access */ +} CPSR_Type; + + + +/* CPSR Register Definitions */ +#define CPSR_N_Pos 31U /*!< \brief CPSR: N Position */ +#define CPSR_N_Msk (1UL << CPSR_N_Pos) /*!< \brief CPSR: N Mask */ + +#define CPSR_Z_Pos 30U /*!< \brief CPSR: Z Position */ +#define CPSR_Z_Msk (1UL << CPSR_Z_Pos) /*!< \brief CPSR: Z Mask */ + +#define CPSR_C_Pos 29U /*!< \brief CPSR: C Position */ +#define CPSR_C_Msk (1UL << CPSR_C_Pos) /*!< \brief CPSR: C Mask */ + +#define CPSR_V_Pos 28U /*!< \brief CPSR: V Position */ +#define CPSR_V_Msk (1UL << CPSR_V_Pos) /*!< \brief CPSR: V Mask */ + +#define CPSR_Q_Pos 27U /*!< \brief CPSR: Q Position */ +#define CPSR_Q_Msk (1UL << CPSR_Q_Pos) /*!< \brief CPSR: Q Mask */ + +#define CPSR_IT0_Pos 25U /*!< \brief CPSR: IT0 Position */ +#define CPSR_IT0_Msk (3UL << CPSR_IT0_Pos) /*!< \brief CPSR: IT0 Mask */ + +#define CPSR_J_Pos 24U /*!< \brief CPSR: J Position */ +#define CPSR_J_Msk (1UL << CPSR_J_Pos) /*!< \brief CPSR: J Mask */ + +#define CPSR_GE_Pos 16U /*!< \brief CPSR: GE Position */ +#define CPSR_GE_Msk (0xFUL << CPSR_GE_Pos) /*!< \brief CPSR: GE Mask */ + +#define CPSR_IT1_Pos 10U /*!< \brief CPSR: IT1 Position */ +#define CPSR_IT1_Msk (0x3FUL << CPSR_IT1_Pos) /*!< \brief CPSR: IT1 Mask */ + +#define CPSR_E_Pos 9U /*!< \brief CPSR: E Position */ +#define CPSR_E_Msk (1UL << CPSR_E_Pos) /*!< \brief CPSR: E Mask */ + +#define CPSR_A_Pos 8U /*!< \brief CPSR: A Position */ +#define CPSR_A_Msk (1UL << CPSR_A_Pos) /*!< \brief CPSR: A Mask */ + +#define CPSR_I_Pos 7U /*!< \brief CPSR: I Position */ +#define CPSR_I_Msk (1UL << CPSR_I_Pos) /*!< \brief CPSR: I Mask */ + +#define CPSR_F_Pos 6U /*!< \brief CPSR: F Position */ +#define CPSR_F_Msk (1UL << CPSR_F_Pos) /*!< \brief CPSR: F Mask */ + +#define CPSR_T_Pos 5U /*!< \brief CPSR: T Position */ +#define CPSR_T_Msk (1UL << CPSR_T_Pos) /*!< \brief CPSR: T Mask */ + +#define CPSR_M_Pos 0U /*!< \brief CPSR: M Position */ +#define CPSR_M_Msk (0x1FUL << CPSR_M_Pos) /*!< \brief CPSR: M Mask */ + +#define CPSR_M_USR 0x10U /*!< \brief CPSR: M User mode (PL0) */ +#define CPSR_M_FIQ 0x11U /*!< \brief CPSR: M Fast Interrupt mode (PL1) */ +#define CPSR_M_IRQ 0x12U /*!< \brief CPSR: M Interrupt mode (PL1) */ +#define CPSR_M_SVC 0x13U /*!< \brief CPSR: M Supervisor mode (PL1) */ +#define CPSR_M_MON 0x16U /*!< \brief CPSR: M Monitor mode (PL1) */ +#define CPSR_M_ABT 0x17U /*!< \brief CPSR: M Abort mode (PL1) */ +#define CPSR_M_HYP 0x1AU /*!< \brief CPSR: M Hypervisor mode (PL2) */ +#define CPSR_M_UND 0x1BU /*!< \brief CPSR: M Undefined mode (PL1) */ +#define CPSR_M_SYS 0x1FU /*!< \brief CPSR: M System mode (PL1) */ + +/* CP15 Register SCTLR */ +typedef union +{ + struct + { + uint32_t M:1; /*!< \brief bit: 0 MMU enable */ + uint32_t A:1; /*!< \brief bit: 1 Alignment check enable */ + uint32_t C:1; /*!< \brief bit: 2 Cache enable */ + RESERVED(0:2, uint32_t) + uint32_t CP15BEN:1; /*!< \brief bit: 5 CP15 barrier enable */ + RESERVED(1:1, uint32_t) + uint32_t B:1; /*!< \brief bit: 7 Endianness model */ + RESERVED(2:2, uint32_t) + uint32_t SW:1; /*!< \brief bit: 10 SWP and SWPB enable */ + uint32_t Z:1; /*!< \brief bit: 11 Branch prediction enable */ + uint32_t I:1; /*!< \brief bit: 12 Instruction cache enable */ + uint32_t V:1; /*!< \brief bit: 13 Vectors bit */ + uint32_t RR:1; /*!< \brief bit: 14 Round Robin select */ + RESERVED(3:2, uint32_t) + uint32_t HA:1; /*!< \brief bit: 17 Hardware Access flag enable */ + RESERVED(4:1, uint32_t) + uint32_t WXN:1; /*!< \brief bit: 19 Write permission implies XN */ + uint32_t UWXN:1; /*!< \brief bit: 20 Unprivileged write permission implies PL1 XN */ + uint32_t FI:1; /*!< \brief bit: 21 Fast interrupts configuration enable */ + uint32_t U:1; /*!< \brief bit: 22 Alignment model */ + RESERVED(5:1, uint32_t) + uint32_t VE:1; /*!< \brief bit: 24 Interrupt Vectors Enable */ + uint32_t EE:1; /*!< \brief bit: 25 Exception Endianness */ + RESERVED(6:1, uint32_t) + uint32_t NMFI:1; /*!< \brief bit: 27 Non-maskable FIQ (NMFI) support */ + uint32_t TRE:1; /*!< \brief bit: 28 TEX remap enable. */ + uint32_t AFE:1; /*!< \brief bit: 29 Access flag enable */ + uint32_t TE:1; /*!< \brief bit: 30 Thumb Exception enable */ + RESERVED(7:1, uint32_t) + } b; /*!< \brief Structure used for bit access */ + uint32_t w; /*!< \brief Type used for word access */ +} SCTLR_Type; + +#define SCTLR_TE_Pos 30U /*!< \brief SCTLR: TE Position */ +#define SCTLR_TE_Msk (1UL << SCTLR_TE_Pos) /*!< \brief SCTLR: TE Mask */ + +#define SCTLR_AFE_Pos 29U /*!< \brief SCTLR: AFE Position */ +#define SCTLR_AFE_Msk (1UL << SCTLR_AFE_Pos) /*!< \brief SCTLR: AFE Mask */ + +#define SCTLR_TRE_Pos 28U /*!< \brief SCTLR: TRE Position */ +#define SCTLR_TRE_Msk (1UL << SCTLR_TRE_Pos) /*!< \brief SCTLR: TRE Mask */ + +#define SCTLR_NMFI_Pos 27U /*!< \brief SCTLR: NMFI Position */ +#define SCTLR_NMFI_Msk (1UL << SCTLR_NMFI_Pos) /*!< \brief SCTLR: NMFI Mask */ + +#define SCTLR_EE_Pos 25U /*!< \brief SCTLR: EE Position */ +#define SCTLR_EE_Msk (1UL << SCTLR_EE_Pos) /*!< \brief SCTLR: EE Mask */ + +#define SCTLR_VE_Pos 24U /*!< \brief SCTLR: VE Position */ +#define SCTLR_VE_Msk (1UL << SCTLR_VE_Pos) /*!< \brief SCTLR: VE Mask */ + +#define SCTLR_U_Pos 22U /*!< \brief SCTLR: U Position */ +#define SCTLR_U_Msk (1UL << SCTLR_U_Pos) /*!< \brief SCTLR: U Mask */ + +#define SCTLR_FI_Pos 21U /*!< \brief SCTLR: FI Position */ +#define SCTLR_FI_Msk (1UL << SCTLR_FI_Pos) /*!< \brief SCTLR: FI Mask */ + +#define SCTLR_UWXN_Pos 20U /*!< \brief SCTLR: UWXN Position */ +#define SCTLR_UWXN_Msk (1UL << SCTLR_UWXN_Pos) /*!< \brief SCTLR: UWXN Mask */ + +#define SCTLR_WXN_Pos 19U /*!< \brief SCTLR: WXN Position */ +#define SCTLR_WXN_Msk (1UL << SCTLR_WXN_Pos) /*!< \brief SCTLR: WXN Mask */ + +#define SCTLR_HA_Pos 17U /*!< \brief SCTLR: HA Position */ +#define SCTLR_HA_Msk (1UL << SCTLR_HA_Pos) /*!< \brief SCTLR: HA Mask */ + +#define SCTLR_RR_Pos 14U /*!< \brief SCTLR: RR Position */ +#define SCTLR_RR_Msk (1UL << SCTLR_RR_Pos) /*!< \brief SCTLR: RR Mask */ + +#define SCTLR_V_Pos 13U /*!< \brief SCTLR: V Position */ +#define SCTLR_V_Msk (1UL << SCTLR_V_Pos) /*!< \brief SCTLR: V Mask */ + +#define SCTLR_I_Pos 12U /*!< \brief SCTLR: I Position */ +#define SCTLR_I_Msk (1UL << SCTLR_I_Pos) /*!< \brief SCTLR: I Mask */ + +#define SCTLR_Z_Pos 11U /*!< \brief SCTLR: Z Position */ +#define SCTLR_Z_Msk (1UL << SCTLR_Z_Pos) /*!< \brief SCTLR: Z Mask */ + +#define SCTLR_SW_Pos 10U /*!< \brief SCTLR: SW Position */ +#define SCTLR_SW_Msk (1UL << SCTLR_SW_Pos) /*!< \brief SCTLR: SW Mask */ + +#define SCTLR_B_Pos 7U /*!< \brief SCTLR: B Position */ +#define SCTLR_B_Msk (1UL << SCTLR_B_Pos) /*!< \brief SCTLR: B Mask */ + +#define SCTLR_CP15BEN_Pos 5U /*!< \brief SCTLR: CP15BEN Position */ +#define SCTLR_CP15BEN_Msk (1UL << SCTLR_CP15BEN_Pos) /*!< \brief SCTLR: CP15BEN Mask */ + +#define SCTLR_C_Pos 2U /*!< \brief SCTLR: C Position */ +#define SCTLR_C_Msk (1UL << SCTLR_C_Pos) /*!< \brief SCTLR: C Mask */ + +#define SCTLR_A_Pos 1U /*!< \brief SCTLR: A Position */ +#define SCTLR_A_Msk (1UL << SCTLR_A_Pos) /*!< \brief SCTLR: A Mask */ + +#define SCTLR_M_Pos 0U /*!< \brief SCTLR: M Position */ +#define SCTLR_M_Msk (1UL << SCTLR_M_Pos) /*!< \brief SCTLR: M Mask */ + + +#if (__VIC_PRESENT == 1U) || defined(DOXYGEN) + /** \brief Structure type to access the Vectored Interrupt Controller (PL190) (VIC) */ + typedef struct + { + __IM uint32_t VICIRQSTATUS; /*!< \brief Offset: 0x000 (R/ ) Provides the status of interrupts [31:0] after IRQ masking */ + __IM uint32_t VICFIQSTATUS; /*!< \brief Offset: 0x004 (R/ ) Provides the status of the interrupts after FIQ masking */ + __IM uint32_t VICRAWINTR; /*!< \brief Offset: 0x008 (R/ ) Provides the status of the source interrupts, and software interrupts */ + __IOM uint32_t VICINTSELECT; /*!< \brief Offset: 0x00C (R/W) Selects whether the corresponding interrupt source generates an FIQ or an IRQ */ + __IOM uint32_t VICINTENABLE; /*!< \brief Offset: 0x010 (R/W) Enables the interrupt request lines */ + __OM uint32_t VICINTENCLEAR; /*!< \brief Offset: 0x014 ( /W) Clears bits in the VICIntEnable Register */ + __IOM uint32_t VICSOFTINT; /*!< \brief Offset: 0x018 (R/W) Generates software interrupts */ + __OM uint32_t VICSOFTINTCLEAR; /*!< \brief Offset: 0x01C ( /W) Clears the corresponding bit in the VICSOFTINT Register */ + __IOM uint32_t VICPROTECTION; /*!< \brief Offset: 0x020 (R/W) Enables or disables protected register access */ + RESERVED(0:4, uint32_t) + __IOM uint32_t VICVECTADDR; /*!< \brief Offset: 0x030 (R/W) Contains the ISR address of the currently active interrupt */ + __IOM uint32_t VICDEFVECTADDR; /*!< \brief Offset: 0x034 (R/W) Contains the default ISR address */ + RESERVED(1:50, uint32_t) + __IOM uint32_t VICVECTADDRx[16]; /*!< \brief Offset: 0x100 (R/W) Contain the ISR vector addresses */ + RESERVED(2:30, uint32_t) + __IOM uint32_t VICVECTCNTLx[16]; /*!< \brief Offset: 0x200 (R/W) Select the interrupt source for the vectored interrupt */ + RESERVED(3:872, uint32_t) + __IM uint32_t VICPERIPHID0; /*!< \brief Offset: 0xFE0 (R/ ) Peripheral Identification Register 0 */ + __IM uint32_t VICPERIPHID1; /*!< \brief Offset: 0xFE4 (R/ ) Peripheral Identification Register 1*/ + __IM uint32_t VICPERIPHID2; /*!< \brief Offset: 0xFE8 (R/ ) Peripheral Identification Register 2 */ + __IM uint32_t VICPERIPHID3; /*!< \brief Offset: 0xFEC (R/ ) Peripheral Identification Register 3 */ + __IM uint32_t VICPCELLID[4]; /*!< \brief Offset: 0xFF0 (R/ ) PrimeCell Identification Registers */ + } VIC_Type; + + #define VIC ((VIC_Type *) VIC_BASE) /*!< \brief GIC Distributor register set access pointer */ + + + #define VICPROTECTION_Protection_Pos 0U + #define VICPROTECTION_Protection_Msk (1U << VICPROTECTION_Protection_Pos) + #define VICPROTECTION_Protection(x) (((uint32_t)(((uint32_t)(x)) /*<< VICPROTECTION_Protection_Pos*/)) & VICPROTECTION_Protection_Msk) + + #define VICVECTCNTL_IntSource_Pos 0U + #define VICVECTCNTL_IntSource_Msk (0x1FU << VICVECTCNTL_IntSource_Pos) + #define VICVECTCNTL_IntSource(x) (((uint32_t)(((uint32_t)(x)) /*<< VICVECTCNTL_IntSource_Pos*/)) & VICVECTCNTL_IntSource_Msk) + #define VICVECTCNTL_E_Pos 5U + #define VICVECTCNTL_E_Msk (1U << VICVECTCNTL_E_Pos) + #define VICVECTCNTL_E(x) (((uint32_t)(((uint32_t)(x)) << VICVECTCNTL_E_Pos)) & VICVECTCNTL_E_Msk) + + #define VICPERIPHID0_Partnumber0_Pos 0U + #define VICPERIPHID0_Partnumber0_Mask (0xFFU << VICPERIPHID0_Partnumber0_Pos) + + #define VICPERIPHID1_Partnumber1_Pos 0U + #define VICPERIPHID1_Partnumber1_Mask (0xFU << VICPERIPHID1_Partnumber1_Pos) + #define VICPERIPHID1_Designer0_Pos 4U + #define VICPERIPHID1_Designer0_Mask (0xFU << VICPERIPHID1_Designer0_Pos) + + #define VICPERIPHID2_PaDesigner1_Pos 0U + #define VICPERIPHID2_PaDesigner1_Mask (0xFU << VICPERIPHID2_PaDesigner1_Pos) + #define VICPERIPHID2_Revision_Pos 4U + #define VICPERIPHID2_Revision_Mask (0xFU << VICPERIPHID2_Revision_Pos) + + #define VICPERIPHID3_Configuration_Pos 0U + #define VICPERIPHID3_Configuration_Mask (0xFFU << VICPERIPHID3_Configuration_Pos) + + #define VICPCELLID_VICPCellID_Pos 0U + #define VICPCELLID_VICPCellID_Msk (0xFFU << VICPCELLID_VICPCellID_Pos) +#endif /* (__VIC_PRESENT == 1U) || defined(DOXYGEN) */ + +#if (__GIC_PRESENT == 1U) || defined(DOXYGEN) + #include "gicv2.h" +#endif /* (__GIC_PRESENT == 1U) || defined(DOXYGEN) */ + +#ifdef __cplusplus + } +#endif + +#endif /* __ARM_V7A_DEPENDANT */ +#endif /* __CMSIS_GENERIC */ \ No newline at end of file diff --git a/CMSIS/Core/Include/r-profile/armv7r_cp15.h b/CMSIS/Core/Include/r-profile/armv7r_cp15.h new file mode 100644 index 000000000..c325628b3 --- /dev/null +++ b/CMSIS/Core/Include/r-profile/armv7r_cp15.h @@ -0,0 +1,27 @@ +/**************************************************************************//** + * @file armv7r_cp15.h + * @brief CMSIS compiler specific macros, functions, instructions + * @version V1.0.2 + * @date 19. December 2022 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +// The cp15-interface is located in the profile folder for cortex-a devices +#include "../a-profile/armv7a_cp15.h" diff --git a/CMSIS/Core/Include/r-profile/armv8r.h b/CMSIS/Core/Include/r-profile/armv8r.h new file mode 100644 index 000000000..8dca4c650 --- /dev/null +++ b/CMSIS/Core/Include/r-profile/armv8r.h @@ -0,0 +1,355 @@ +/**************************************************************************//** + * @file armv8r.h + * @brief CMSIS Cortex-R Core Peripheral Access Layer Header File for ARMv8-R + * @version V1.0.0 + * @date 2. July 2023 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __ARM_V8R_GENERIC +#define __ARM_V8R_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup ARMv8-R + @{ + */ + +#include "cmsis_version.h" + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if (defined(__ARM_NEON) && (__ARM_NEON == 1)) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ +#include "armv8r_system_control.h" + +#ifdef __cplusplus +} +#endif + +#endif /* __ARM_V8R_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __ARM_V8R_DEPENDANT +#define __ARM_V8R_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + + /* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ICACHE_PRESENT + #define __ICACHE_PRESENT 0U + #warning "__ICACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DCACHE_PRESENT + #define __DCACHE_PRESENT 0U + #warning "__DCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DTCM_PRESENT + #define __DTCM_PRESENT 0U + #warning "__DTCM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ECC_PRESENT + #define __ECC_PRESENT 0U + #warning "__ECC_PRESENT not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< \brief Defines 'read only' permissions */ +#else + #define __I volatile const /*!< \brief Defines 'read only' permissions */ +#endif +#define __O volatile /*!< \brief Defines 'write only' permissions */ +#define __IO volatile /*!< \brief Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*!< \brief Defines 'read only' structure member permissions */ +#define __OM volatile /*!< \brief Defines 'write only' structure member permissions */ +#define __IOM volatile /*!< \brief Defines 'read / write' structure member permissions */ +#define RESERVED(N, T) T RESERVED##N; // placeholder struct members used for "reserved" areas + +/** @} end of group ARMv8-R */ + + + + /******************************************************************************* + * Register Abstraction + Core Register contain: + - CPSR + ******************************************************************************/ + +/* Core Register CPSR */ +typedef union +{ + struct + { + uint32_t M:4; /*!< \brief bit: 0.. 3 Mode field */ + RESERVED(0:2, uint32_t) /* bit: 4.. 5 Reserved */ + uint32_t F:1; /*!< \brief bit: 6 FIQ mask bit */ + uint32_t I:1; /*!< \brief bit: 7 IRQ mask bit */ + uint32_t A:1; /*!< \brief bit: 8 Asynchronous abort mask bit */ + uint32_t E:1; /*!< \brief bit: 9 Endianness execution state bit */ + RESERVED(1:6, uint32_t) /* bit: 10..15 Reserved */ + uint32_t GE:4; /*!< \brief bit: 16..19 Greater than or Equal flags */ + RESERVED(2:1, uint32_t) /* bit: 20 Reserved */ + uint32_t DIT:1; /*!< \brief bit: 21 Data Independent Timing */ + uint32_t PAN:1; /*!< \brief bit: 22 Privileged Access Never */ + uint32_t SSBS:1; /*!< \brief bit: 23 Speculative Store Bypass Safe */ + RESERVED(3:3, uint32_t) /* bit: 24..26 Reserved */ + uint32_t Q:1; /*!< \brief bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< \brief bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< \brief bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< \brief bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< \brief bit: 31 Negative condition code flag */ + } b; /*!< \brief Structure used for bit access */ + uint32_t w; /*!< \brief Type used for word access */ +} CPSR_Type; + + + +/* CPSR Register Definitions */ +#define CPSR_N_Pos 31U /*!< \brief CPSR: N Position */ +#define CPSR_N_Msk (1UL << CPSR_N_Pos) /*!< \brief CPSR: N Mask */ + +#define CPSR_Z_Pos 30U /*!< \brief CPSR: Z Position */ +#define CPSR_Z_Msk (1UL << CPSR_Z_Pos) /*!< \brief CPSR: Z Mask */ + +#define CPSR_C_Pos 29U /*!< \brief CPSR: C Position */ +#define CPSR_C_Msk (1UL << CPSR_C_Pos) /*!< \brief CPSR: C Mask */ + +#define CPSR_V_Pos 28U /*!< \brief CPSR: V Position */ +#define CPSR_V_Msk (1UL << CPSR_V_Pos) /*!< \brief CPSR: V Mask */ + +#define CPSR_Q_Pos 27U /*!< \brief CPSR: Q Position */ +#define CPSR_Q_Msk (1UL << CPSR_Q_Pos) /*!< \brief CPSR: Q Mask */ + +#define CPSR_SSBS_Pos 23U /*!< \brief CPSR: SSBS Position */ +#define CPSR_SSBS_Msk (0x1UL << CPSR_SSBS_Pos) /*!< \brief CPSR: SSBS Mask */ + +#define CPSR_PAN_Pos 22U /*!< \brief CPSR: PAN Position */ +#define CPSR_PAN_Msk (0x1UL << CPSR_PAN_Pos) /*!< \brief CPSR: PAN Mask */ + +#define CPSR_DIT_Pos 21U /*!< \brief CPSR: DIT Position */ +#define CPSR_DIT_Msk (0x1UL << CPSR_DIT_Pos) /*!< \brief CPSR: DIT Mask */ + +#define CPSR_GE_Pos 16U /*!< \brief CPSR: GE Position */ +#define CPSR_GE_Msk (0xFUL << CPSR_GE_Pos) /*!< \brief CPSR: GE Mask */ + +#define CPSR_E_Pos 9U /*!< \brief CPSR: E Position */ +#define CPSR_E_Msk (1UL << CPSR_E_Pos) /*!< \brief CPSR: E Mask */ + +#define CPSR_A_Pos 8U /*!< \brief CPSR: A Position */ +#define CPSR_A_Msk (1UL << CPSR_A_Pos) /*!< \brief CPSR: A Mask */ + +#define CPSR_I_Pos 7U /*!< \brief CPSR: I Position */ +#define CPSR_I_Msk (1UL << CPSR_I_Pos) /*!< \brief CPSR: I Mask */ + +#define CPSR_F_Pos 6U /*!< \brief CPSR: F Position */ +#define CPSR_F_Msk (1UL << CPSR_F_Pos) /*!< \brief CPSR: F Mask */ + +#define CPSR_M_Pos 0U /*!< \brief CPSR: M Position */ +#define CPSR_M_Msk (0xFUL << CPSR_M_Pos) /*!< \brief CPSR: M Mask */ + +#define CPSR_M_USR 0x10U /*!< \brief CPSR: M User mode (PL0) */ +#define CPSR_M_FIQ 0x11U /*!< \brief CPSR: M Fast Interrupt mode (PL1) */ +#define CPSR_M_IRQ 0x12U /*!< \brief CPSR: M Interrupt mode (PL1) */ +#define CPSR_M_SVC 0x13U /*!< \brief CPSR: M Supervisor mode (PL1) */ +#define CPSR_M_MON 0x16U /*!< \brief CPSR: M Monitor mode (PL1) */ +#define CPSR_M_ABT 0x17U /*!< \brief CPSR: M Abort mode (PL1) */ +#define CPSR_M_HYP 0x1AU /*!< \brief CPSR: M Hypervisor mode (PL2) */ +#define CPSR_M_UND 0x1BU /*!< \brief CPSR: M Undefined mode (PL1) */ +#define CPSR_M_SYS 0x1FU /*!< \brief CPSR: M System mode (PL1) */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __ARM_V8R_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ \ No newline at end of file diff --git a/CMSIS/Core/Include/r-profile/armv8r_system_control.h b/CMSIS/Core/Include/r-profile/armv8r_system_control.h new file mode 100644 index 000000000..2807ef7bc --- /dev/null +++ b/CMSIS/Core/Include/r-profile/armv8r_system_control.h @@ -0,0 +1,27 @@ +/**************************************************************************//** + * @file armv8r_system_control.h + * @brief CMSIS compiler specific macros, functions, instructions + * @version V6.0.0 + * @date 4. August 2023 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +// The gic-interface is located in the profile folder for cortex-a devices +#include "../a-profile/armv8a_system_control.h" \ No newline at end of file diff --git a/CMSIS/Core/Include/r-profile/cmsis_armclang_r.h b/CMSIS/Core/Include/r-profile/cmsis_armclang_r.h new file mode 100644 index 000000000..99dbadcb6 --- /dev/null +++ b/CMSIS/Core/Include/r-profile/cmsis_armclang_r.h @@ -0,0 +1,161 @@ +/**************************************************************************//** + * @file cmsis_armclang_r.h + * @brief CMSIS compiler armclang (Arm Compiler 6) header file + * @version V5.0.0 + * @date 04. December 2022 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_ARMCLANG_CORER_H +#define __CMSIS_ARMCLANG_CORER_H + +#pragma clang system_header /* treat file as system include file */ + +#ifndef __CMSIS_ARMCLANG_H + #error "This file must not be included directly" +#endif + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** \brief Get CPSR Register + \return CPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CPSR(void) +{ + uint32_t result; + __ASM volatile("MRS %0, cpsr" : "=r" (result) ); + return(result); +} + +/** \brief Set CPSR Register + \param [in] cpsr CPSR value to set + */ +__STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr) +{ + __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory"); +} + +/** \brief Get Mode + \return Processor Mode + */ +__STATIC_FORCEINLINE uint32_t __get_mode(void) +{ + return (__get_CPSR() & 0x1FU); +} + +/** \brief Set Mode + \param [in] mode Mode value to set + */ +__STATIC_FORCEINLINE void __set_mode(uint32_t mode) +{ + __ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory"); +} + +/** \brief Get Stack Pointer + \return Stack Pointer value + */ +__STATIC_FORCEINLINE uint32_t __get_SP(void) +{ + uint32_t result; + __ASM volatile("MOV %0, sp" : "=r" (result) : : "memory"); + return result; +} + +/** \brief Set Stack Pointer + \param [in] stack Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_SP(uint32_t stack) +{ + __ASM volatile("MOV sp, %0" : : "r" (stack) : "memory"); +} + +/** \brief Get USR/SYS Stack Pointer + \return USR/SYS Stack Pointer value + */ +__STATIC_FORCEINLINE uint32_t __get_SP_usr(void) +{ + uint32_t cpsr; + uint32_t result; + __ASM volatile( + "MRS %0, cpsr \n" + "CPS #0x1F \n" // no effect in USR mode + "MOV %1, sp \n" + "MSR cpsr_c, %0 \n" // no effect in USR mode + "ISB" : "=r"(cpsr), "=r"(result) : : "memory" + ); + return result; +} + +/** \brief Set USR/SYS Stack Pointer + \param [in] topOfProcStack USR/SYS Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_SP_usr(uint32_t topOfProcStack) +{ + uint32_t cpsr; + __ASM volatile( + "MRS %0, cpsr \n" + "CPS #0x1F \n" // no effect in USR mode + "MOV sp, %1 \n" + "MSR cpsr_c, %0 \n" // no effect in USR mode + "ISB" : "=r"(cpsr) : "r" (topOfProcStack) : "memory" + ); +} + +/** \brief Get FPEXC + \return Floating Point Exception Control register value + */ +__STATIC_FORCEINLINE uint32_t __get_FPEXC(void) +{ +#if (__FPU_PRESENT == 1) + uint32_t result; + __ASM volatile("VMRS %0, fpexc" : "=r" (result) : : "memory"); + return(result); +#else + return(0); +#endif +} + +/** \brief Set FPEXC + \param [in] fpexc Floating Point Exception Control value to set + */ +__STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc) +{ +#if (__FPU_PRESENT == 1) + __ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory"); +#endif +} + +/** @} end of CMSIS_Core_RegAccFunctions */ + + +/* + * Include common core functions to access Coprocessor 15 registers + */ + +#define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" ) +#define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" ) +#define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" ) +#define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" ) + +#endif /* __CMSIS_ARMCLANG_CORER_H */ diff --git a/CMSIS/Core/Include/r-profile/cmsis_clang_r.h b/CMSIS/Core/Include/r-profile/cmsis_clang_r.h new file mode 100644 index 000000000..b3a8854c9 --- /dev/null +++ b/CMSIS/Core/Include/r-profile/cmsis_clang_r.h @@ -0,0 +1,160 @@ +/**************************************************************************//** + * @file cmsis_armclang_r.h + * @brief CMSIS compiler armclang (Arm Compiler 6) header file + * @version V5.0.0 + * @date 04. December 2022 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_ARMCLANG_CORER_H +#define __CMSIS_ARMCLANG_CORER_H + +#pragma clang system_header /* treat file as system include file */ + +#ifndef __CMSIS_CLANG_H + #error "This file must not be included directly" +#endif + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** \brief Get CPSR Register + \return CPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CPSR(void) +{ + uint32_t result; + __ASM volatile("MRS %0, cpsr" : "=r" (result) ); + return(result); +} + +/** \brief Set CPSR Register + \param [in] cpsr CPSR value to set + */ +__STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr) +{ + __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory"); +} + +/** \brief Get Mode + \return Processor Mode + */ +__STATIC_FORCEINLINE uint32_t __get_mode(void) +{ + return (__get_CPSR() & 0x1FU); +} + +/** \brief Set Mode + \param [in] mode Mode value to set + */ +__STATIC_FORCEINLINE void __set_mode(uint32_t mode) +{ + __ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory"); +} + +/** \brief Get Stack Pointer + \return Stack Pointer value + */ +__STATIC_FORCEINLINE uint32_t __get_SP(void) +{ + uint32_t result; + __ASM volatile("MOV %0, sp" : "=r" (result) : : "memory"); + return result; +} + +/** \brief Set Stack Pointer + \param [in] stack Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_SP(uint32_t stack) +{ + __ASM volatile("MOV sp, %0" : : "r" (stack) : "memory"); +} + +/** \brief Get USR/SYS Stack Pointer + \return USR/SYS Stack Pointer value + */ +__STATIC_FORCEINLINE uint32_t __get_SP_usr(void) +{ + uint32_t cpsr; + uint32_t result; + __ASM volatile( + "MRS %0, cpsr \n" + "CPS #0x1F \n" // no effect in USR mode + "MOV %1, sp \n" + "MSR cpsr_c, %0 \n" // no effect in USR mode + "ISB" : "=r"(cpsr), "=r"(result) : : "memory" + ); + return result; +} + +/** \brief Set USR/SYS Stack Pointer + \param [in] topOfProcStack USR/SYS Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_SP_usr(uint32_t topOfProcStack) +{ + uint32_t cpsr; + __ASM volatile( + "MRS %0, cpsr \n" + "CPS #0x1F \n" // no effect in USR mode + "MOV sp, %1 \n" + "MSR cpsr_c, %0 \n" // no effect in USR mode + "ISB" : "=r"(cpsr) : "r" (topOfProcStack) : "memory" + ); +} + +/** \brief Get FPEXC + \return Floating Point Exception Control register value + */ +__STATIC_FORCEINLINE uint32_t __get_FPEXC(void) +{ +#if (__FPU_PRESENT == 1) + uint32_t result; + __ASM volatile("VMRS %0, fpexc" : "=r" (result) : : "memory"); + return(result); +#else + return(0); +#endif +} + +/** \brief Set FPEXC + \param [in] fpexc Floating Point Exception Control value to set + */ +__STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc) +{ +#if (__FPU_PRESENT == 1) + __ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory"); +#endif +} + +/** @} end of CMSIS_Core_RegAccFunctions */ + +/* + * Include common core functions to access Coprocessor 15 registers + */ + +#define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" ) +#define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" ) +#define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" ) +#define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" ) + +#endif /* __CMSIS_ARMCLANG_CORER_H */ diff --git a/CMSIS/Core/Include/r-profile/cmsis_gcc_r.h b/CMSIS/Core/Include/r-profile/cmsis_gcc_r.h new file mode 100644 index 000000000..71a2c5195 --- /dev/null +++ b/CMSIS/Core/Include/r-profile/cmsis_gcc_r.h @@ -0,0 +1,162 @@ +/**************************************************************************//** + * @file cmsis_gcc_r.h + * @brief CMSIS compiler GCC header file + * @version V6.0.0 + * @date 4. August 2023 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_GCC_CORER_H +#define __CMSIS_GCC_CORER_H + +#ifndef __CMSIS_GCC_H + #error "This file must not be included directly" +#endif + +/* ignore some GCC warnings */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wsign-conversion" +#pragma GCC diagnostic ignored "-Wconversion" +#pragma GCC diagnostic ignored "-Wunused-parameter" + + +/** \defgroup CMSIS_Core_intrinsics CMSIS Core Intrinsics + Access to dedicated SIMD instructions + @{ +*/ +/** \brief Get CPSR Register + \return CPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CPSR(void) +{ + uint32_t result; + __ASM volatile("MRS %0, cpsr" : "=r" (result) ); + return(result); +} + +/** \brief Set CPSR Register + \param [in] cpsr CPSR value to set + */ +__STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr) +{ + __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory"); +} + +/** \brief Get Mode + \return Processor Mode + */ +__STATIC_FORCEINLINE uint32_t __get_mode(void) +{ + return (__get_CPSR() & 0x1FU); +} + +/** \brief Set Mode + \param [in] mode Mode value to set + */ +__STATIC_FORCEINLINE void __set_mode(uint32_t mode) +{ + __ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory"); +} + +/** \brief Get Stack Pointer + \return Stack Pointer value + */ +__STATIC_FORCEINLINE uint32_t __get_SP(void) +{ + uint32_t result; + __ASM volatile("MOV %0, sp" : "=r" (result) : : "memory"); + return result; +} + +/** \brief Set Stack Pointer + \param [in] stack Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_SP(uint32_t stack) +{ + __ASM volatile("MOV sp, %0" : : "r" (stack) : "memory"); +} + +/** \brief Get USR/SYS Stack Pointer + \return USR/SYS Stack Pointer value + */ +__STATIC_FORCEINLINE uint32_t __get_SP_usr(void) +{ + uint32_t cpsr = __get_CPSR(); + uint32_t result; + __ASM volatile( + "CPS #0x1F \n" + "MOV %0, sp " : "=r"(result) : : "memory" + ); + __set_CPSR(cpsr); + __ISB(); + return result; +} + +/** \brief Set USR/SYS Stack Pointer + \param [in] topOfProcStack USR/SYS Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_SP_usr(uint32_t topOfProcStack) +{ + uint32_t cpsr = __get_CPSR(); + __ASM volatile( + "CPS #0x1F \n" + "MOV sp, %0 " : : "r" (topOfProcStack) : "memory" + ); + __set_CPSR(cpsr); + __ISB(); +} + +/** \brief Get FPEXC + \return Floating Point Exception Control register value + */ +__STATIC_FORCEINLINE uint32_t __get_FPEXC(void) +{ +#if (__FPU_PRESENT == 1) + uint32_t result; + __ASM volatile("VMRS %0, fpexc" : "=r" (result) : : "memory"); + return(result); +#else + return(0); +#endif +} + +/** \brief Set FPEXC + \param [in] fpexc Floating Point Exception Control value to set + */ +__STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc) +{ +#if (__FPU_PRESENT == 1) + __ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory"); +#endif +} + +/* + * Include common core functions to access Coprocessor 15 registers + */ + +#define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" ) +#define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" ) +#define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" ) +#define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" ) + +/*@} end of group CMSIS_Core_intrinsics */ + +#pragma GCC diagnostic pop + +#endif /* __CMSIS_GCC_CORER_H */ diff --git a/CMSIS/Core/Include/r-profile/cmsis_iccarm_r.h b/CMSIS/Core/Include/r-profile/cmsis_iccarm_r.h new file mode 100644 index 000000000..0379428e7 --- /dev/null +++ b/CMSIS/Core/Include/r-profile/cmsis_iccarm_r.h @@ -0,0 +1,39 @@ +/**************************************************************************//** + * @file cmsis_iccarm_r.h + * @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file + * @version V5.0.0 + * @date 04. December 2022 + ******************************************************************************/ + +//------------------------------------------------------------------------------ +// +// Copyright (c) 2018-2023 Arm Limited. All rights reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License") +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//------------------------------------------------------------------------------ + + +#ifndef __CMSIS_ICCARM_CORER_H__ +#define __CMSIS_ICCARM_CORER_H__ + +#ifndef __CMSIS_ICCARM_H__ + #error "This file must not be included directly" +#endif + +#ifndef __ICCARM__ + #error This file should only be compiled by ICCARM +#endif + +#endif /* __CMSIS_ICCARM_CORER_H__ */ diff --git a/CMSIS/Core/Include/r-profile/gicv2.h b/CMSIS/Core/Include/r-profile/gicv2.h new file mode 100644 index 000000000..39d9cebd3 --- /dev/null +++ b/CMSIS/Core/Include/r-profile/gicv2.h @@ -0,0 +1,26 @@ +/****************************************************************************** + * @file gic_v20.h + * @brief CMSIS GIC 2.0 API for Armv7-A MPU and Armv7-R MCU + * @version V6.0.0 + * @date 8. July 2023 + ******************************************************************************/ +/* + * Copyright (c) 2017-2022 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// The gic-interface is located in the profile folder for cortex-a devices +#include "../a-profile/gicv2.h" diff --git a/CMSIS/CoreValidation/Layer/Target/CA5/RTE/Device/ARMCA5/mmu_ARMCA5.c b/CMSIS/CoreValidation/Layer/Target/CA5/RTE/Device/ARMCA5/mmu_ARMCA5.c index 58a94805d..b6b027269 100644 --- a/CMSIS/CoreValidation/Layer/Target/CA5/RTE/Device/ARMCA5/mmu_ARMCA5.c +++ b/CMSIS/CoreValidation/Layer/Target/CA5/RTE/Device/ARMCA5/mmu_ARMCA5.c @@ -145,7 +145,7 @@ void MMU_CreateTranslationTable(void) MMU_TTSection (TTB_BASE, 0, 4096, DESCRIPTOR_FAULT); /* - * Generate descriptors. Refer to core_ca.h to get information about attributes + * Generate descriptors. Refer to armv7a.h to get information about attributes * */ //Create descriptors for Vectors, RO, RW, ZI sections diff --git a/CMSIS/CoreValidation/Layer/Target/CA7/RTE/Device/ARMCA7/mmu_ARMCA7.c b/CMSIS/CoreValidation/Layer/Target/CA7/RTE/Device/ARMCA7/mmu_ARMCA7.c index 26431f3b4..758e254f5 100644 --- a/CMSIS/CoreValidation/Layer/Target/CA7/RTE/Device/ARMCA7/mmu_ARMCA7.c +++ b/CMSIS/CoreValidation/Layer/Target/CA7/RTE/Device/ARMCA7/mmu_ARMCA7.c @@ -145,7 +145,7 @@ void MMU_CreateTranslationTable(void) MMU_TTSection (TTB_BASE, 0, 4096, DESCRIPTOR_FAULT); /* - * Generate descriptors. Refer to core_ca.h to get information about attributes + * Generate descriptors. Refer to armv7a.h to get information about attributes * */ //Create descriptors for Vectors, RO, RW, ZI sections diff --git a/CMSIS/CoreValidation/Layer/Target/CA9/RTE/Device/ARMCA9/mmu_ARMCA9.c b/CMSIS/CoreValidation/Layer/Target/CA9/RTE/Device/ARMCA9/mmu_ARMCA9.c index 1435eb92b..5447e2e81 100644 --- a/CMSIS/CoreValidation/Layer/Target/CA9/RTE/Device/ARMCA9/mmu_ARMCA9.c +++ b/CMSIS/CoreValidation/Layer/Target/CA9/RTE/Device/ARMCA9/mmu_ARMCA9.c @@ -145,7 +145,7 @@ void MMU_CreateTranslationTable(void) MMU_TTSection (TTB_BASE, 0, 4096, DESCRIPTOR_FAULT); /* - * Generate descriptors. Refer to core_ca.h to get information about attributes + * Generate descriptors. Refer to armv7a.h to get information about attributes * */ //Create descriptors for Vectors, RO, RW, ZI sections diff --git a/CMSIS/DoxyGen/Core/Core.dxy.in b/CMSIS/DoxyGen/Core/Core.dxy.in index e330fba08..acb25c9da 100644 --- a/CMSIS/DoxyGen/Core/Core.dxy.in +++ b/CMSIS/DoxyGen/Core/Core.dxy.in @@ -47,7 +47,7 @@ PROJECT_NAME = "CMSIS-Core (Cortex-M)" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = "Version 5.7.0" +PROJECT_NUMBER = "Version 6.0.0" # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/CMSIS/DoxyGen/Core_A/Core_A.dxy.in b/CMSIS/DoxyGen/Core_A/Core_A.dxy.in index 438e56146..5eb35e0f8 100644 --- a/CMSIS/DoxyGen/Core_A/Core_A.dxy.in +++ b/CMSIS/DoxyGen/Core_A/Core_A.dxy.in @@ -47,7 +47,7 @@ PROJECT_NAME = "CMSIS-Core (Cortex-A)" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = "Version 1.2.1" +PROJECT_NUMBER = "Version 6.0.0" # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a @@ -920,18 +920,22 @@ INPUT = src/Overview.txt \ src/Using.txt \ src/Template.txt \ src/MISRA.txt \ - ../../Core_A/Include/core_ca.h \ - ../../Core_A/Include/cmsis_armcc.h \ - ../../Core_A/Include/cmsis_cp15.h \ - ../../Core_A/Source/irq_ctrl_gic.c \ - ../../Core_A/Include/irq_ctrl.h \ + ../../Core/Include/cmsis_version.h \ + ../../Core/Include/a-profile/armv7a.h \ + ../../Core/Include/a-profile/armv7a_cp15.h \ + ../../Core/Include/a-profile/armv8a.h \ + ../../Core/Include/cmsis_clang.h \ + ../../Core/Include/a-profile/cmsis_clang_a.h \ + ../../Core/Include/a-profile/gicv2.h \ + ../../Core/Source/irq_ctrl_gic.c \ + ../../Core/Include/a-profile/irq_ctrl.h \ src/Ref_SystemAndClock.txt \ src/ref_gic.txt \ src/ref_core_register.txt \ src/ref_cache.txt \ src/ref_timer.txt \ src/ref_mmu.txt \ - src/core_ca.txt \ + src/arm7a.txt \ src/cmsis_armcc.txt \ src/irq_ctrl.txt \ diff --git a/CMSIS/DoxyGen/Core_A/src/Overview.txt b/CMSIS/DoxyGen/Core_A/src/Overview.txt index 7719a4dce..32f113a74 100644 --- a/CMSIS/DoxyGen/Core_A/src/Overview.txt +++ b/CMSIS/DoxyGen/Core_A/src/Overview.txt @@ -26,7 +26,7 @@ Files relevant to CMSIS-Core (Cortex-A) are present in the following ARM::CMS |File/Folder |Content | |--------------------------------|------------------------------------------------------------------------| |\b CMSIS\\Documentation\\Core_A | This documentation | -|\b CMSIS\\Core_A\\Include | CMSIS-Core (Cortex-A) header files (for example core_ca.h, etc.) | +|\b CMSIS\\Core\\Include | CMSIS-Core (Cortex-A) header files (for example core_ca5.h, etc.) | |\b Device | \ref using_ARM_pg "Arm reference implementations" of Cortex-A devices | |\b Device\\\_Template_Vendor | \ref templates_pg for extension by silicon vendors | diff --git a/CMSIS/DoxyGen/Core_A/src/Template.txt b/CMSIS/DoxyGen/Core_A/src/Template.txt index 7c784cfe9..c4669ccc6 100644 --- a/CMSIS/DoxyGen/Core_A/src/Template.txt +++ b/CMSIS/DoxyGen/Core_A/src/Template.txt @@ -6,6 +6,7 @@ Arm supplies CMSIS-Core device template files for the all supported Cortex-A processors and various compiler vendors. Refer to the list of \ref tested_tools_sec for compliance. + These CMSIS-Core device template files include the following: - Register names of the Core Peripherals and names of the Core Exception Vectors. - Functions to access core peripherals, cache, MMU and special CPU instructions @@ -15,26 +16,32 @@ The detailed file structure of the CMSIS-Core device templates is shown in the f -\section CMSIS_Processor_files CMSIS-Core Processor Files +\section CMSIS_Processor_files CMSIS-Core Processor Files -The CMSIS-Core processor files provided by Arm are in the directory .\\CMSIS\\Core_A\\Include. These header files define all processor specific attributes do not need any modifications. +The CMSIS-Core processor files provided by Arm are in the directory .\\CMSIS\\Core\\Include. These header files define all processor specific attributes do not need any modifications. The core_<cpu>.h defines the core peripherals and provides helper functions that access the core registers. One file is available for each supported Cortex-A processor: Header File | Processor :----------------|:------------------------------ -core_ca.h | generics for all supported Cortex-A processors - +core_ca5.h | for the Cortex-A5 processor +core_ca7.h | for the Cortex-A7 processor +core_ca9.h | for the Cortex-A9 processor +core_ca35.h | for the Cortex-A35 processor +core_ca53.h | for the Cortex-A53 processor +core_ca57.h | for the Cortex-A57 processor \section device_examples Device Examples The CMSIS Software Pack defines several devices that are based on the various processors. The device related CMSIS-Core files are in the directory .\\Device\\ARM and include CMSIS-Core processor file explained before. The following sample devices are defined in the CMSIS-Pack description file ARM.CMSIS.pdsc: -Family | Device | Description -:------------------|:------------------|:--------------------------------- -ARM Cortex-A5 | ARMCA5 | Cortex-A5 based device -ARM Cortex-A7 | ARMCA7 | Cortex-A7 based device -ARM Cortex-A9 | ARMCA9 | Cortex-A9 based device - +Family | Device | Description +:-----------------|:------------------|:--------------------------------- +ARM Cortex-A5 | ARMCA5 | Cortex-A5 based device +ARM Cortex-A7 | ARMCA7 | Cortex-A7 based device +ARM Cortex-A9 | ARMCA9 | Cortex-A9 based device +ARM Cortex-A35 | ARMCA35 | Cortex-A35 based device +ARM Cortex-A53 | ARMCA53 | Cortex-A53 based device +ARM Cortex-A57 | ARMCA57 | Cortex-A57 based device \section template_files_sec Template Files @@ -53,7 +60,7 @@ Silicon vendors add to these template files the following information: .\\Device\\\_Template_Vendor\\Vendor\\Device_A\\Source\\ARM\\startup_Device.c Startup file template for Arm C/C++ Compiler. - + .\\Device\\\_Template_Vendor\\Vendor\\Device_A\\Source\\ARM\\Device.sct Linker scatter file template for Arm C/C++ Compiler. @@ -129,9 +136,10 @@ The device configuration of the template files is described in detail on the fol \page startup_c_pg Startup File startup_.c The \ref startup_c_pg contains: - - Exception vectors of the Cortex-A Processor with weak functions that implement default routines. - The reset handler which is executed after CPU reset and typically calls the \ref SystemInit function. - The setup values for the various stack pointers, i.e. per exceptional mode and main stack. + - Exception vectors of the Cortex-A Processor with weak functions that implement default routines. + - Interrupt vectors that are device specific with weak functions that implement default routines. The file exists for each supported toolchain and is the only tool-chain specific CMSIS file. @@ -238,7 +246,7 @@ If these \#defines are missing default values are used. Description - __CM0_REV + __CA5_REV 0x0000 0x0000 Core revision number ([15:8] revision number, [7:0] patch number) @@ -286,9 +294,9 @@ The following code exemplifies the configuration of the Cortex-A9 Processor and #define __GIC_PRESENT 1U /*!< GIC present */ #define __TIM_PRESENT 0U /*!< TIM not present */ #define __L2C_PRESENT 0U /*!< L2C not present */ -: -: -#include "core_ca.h" /* Cortex-A processor and core peripherals */ +. +. +#include "core_ca9.h" /* Cortex-A processor and core peripherals */ \endcode diff --git a/CMSIS/DoxyGen/Core_A/src/core_ca.txt b/CMSIS/DoxyGen/Core_A/src/arm7a.txt similarity index 99% rename from CMSIS/DoxyGen/Core_A/src/core_ca.txt rename to CMSIS/DoxyGen/Core_A/src/arm7a.txt index c4cbf51d4..95369cb8f 100644 --- a/CMSIS/DoxyGen/Core_A/src/core_ca.txt +++ b/CMSIS/DoxyGen/Core_A/src/arm7a.txt @@ -1,5 +1,5 @@ /**************************************************************************//** - * @file core_ca.txt + * @file arm7a.txt * @brief CMSIS Cortex-A Core Peripheral Access Layer Header File ******************************************************************************/ @@ -79,7 +79,7 @@ The registers in the various UARTs can now be referred in the user code as shown \section core_cmsis_pal_min_reqs Minimal Requirements \details - To access the peripheral registers and related function in a device, the files device.h and core_ca.h define as a minimum: + To access the peripheral registers and related function in a device, the files device.h and armv7a.h define as a minimum: \n\n - The Register Layout Typedef for each peripheral that defines all register names. RESERVED is used to introduce space into the structure for adjusting the addresses of diff --git a/README.md b/README.md index 24f4cec11..973b114c8 100644 --- a/README.md +++ b/README.md @@ -31,8 +31,7 @@ For a list of all CMSIS components refer to [**Introduction - CMSIS Components** Directory | Content :------------------- |:--------------------------------------------------------- -CMSIS/Core | CMSIS-Core(M) related files (for release) -CMSIS/Core_A | CMSIS-Core(A) related files (for release) +CMSIS/Core | CMSIS-Core(A/R/M) related files (for release) CMSIS/CoreValidation | Validation for Core(M) and Core(A) (NOT part of release) CMSIS/Driver | CMSIS-Driver API headers and template files CMSIS/RTOS2 | RTOS v2 related files (for Cortex-M & Armv8-M)