RTEMS 5.2
All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
cmsis_gcc.h
Go to the documentation of this file.
1/**************************************************************************/
7/* Copyright (c) 2009 - 2015 ARM LIMITED
8
9 All rights reserved.
10 Redistribution and use in source and binary forms, with or without
11 modification, are permitted provided that the following conditions are met:
12 - Redistributions of source code must retain the above copyright
13 notice, this list of conditions and the following disclaimer.
14 - Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17 - Neither the name of ARM nor the names of its contributors may be used
18 to endorse or promote products derived from this software without
19 specific prior written permission.
20 *
21 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
25 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 POSSIBILITY OF SUCH DAMAGE.
32 ---------------------------------------------------------------------------*/
33
34
35#ifndef __CMSIS_GCC_H
36#define __CMSIS_GCC_H
37
38/* ignore some GCC warnings */
39#if defined ( __GNUC__ )
40#pragma GCC diagnostic push
41#pragma GCC diagnostic ignored "-Wsign-conversion"
42#pragma GCC diagnostic ignored "-Wconversion"
43#pragma GCC diagnostic ignored "-Wunused-parameter"
44#endif
45
46
47/* ########################### Core Function Access ########################### */
58__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_irq(void)
59{
60 __ASM volatile ("cpsie i" : : : "memory");
61}
62
63
69__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_irq(void)
70{
71 __ASM volatile ("cpsid i" : : : "memory");
72}
73
74
80__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CONTROL(void)
81{
82 uint32_t result;
83
84 __ASM volatile ("MRS %0, control" : "=r" (result) );
85 return(result);
86}
87
88
94__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_CONTROL(uint32_t control)
95{
96 __ASM volatile ("MSR control, %0" : : "r" (control) : "memory");
97}
98
99
105__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_IPSR(void)
106{
107 uint32_t result;
108
109 __ASM volatile ("MRS %0, ipsr" : "=r" (result) );
110 return(result);
111}
112
113
119__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_APSR(void)
120{
121 uint32_t result;
122
123 __ASM volatile ("MRS %0, apsr" : "=r" (result) );
124 return(result);
125}
126
127
134__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_xPSR(void)
135{
136 uint32_t result;
137
138 __ASM volatile ("MRS %0, xpsr" : "=r" (result) );
139 return(result);
140}
141
142
148__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PSP(void)
149{
150 register uint32_t result;
151
152 __ASM volatile ("MRS %0, psp\n" : "=r" (result) );
153 return(result);
154}
155
156
162__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PSP(uint32_t topOfProcStack)
163{
164 __ASM volatile ("MSR psp, %0\n" : : "r" (topOfProcStack) : "sp");
165}
166
167
173__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_MSP(void)
174{
175 register uint32_t result;
176
177 __ASM volatile ("MRS %0, msp\n" : "=r" (result) );
178 return(result);
179}
180
181
188__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_MSP(uint32_t topOfMainStack)
189{
190 __ASM volatile ("MSR msp, %0\n" : : "r" (topOfMainStack) : "sp");
191}
192
193
199__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PRIMASK(void)
200{
201 uint32_t result;
202
203 __ASM volatile ("MRS %0, primask" : "=r" (result) );
204 return(result);
205}
206
207
213__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PRIMASK(uint32_t priMask)
214{
215 __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory");
216}
217
218
219#if (__CORTEX_M >= 0x03U)
220
226__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_fault_irq(void)
227{
228 __ASM volatile ("cpsie f" : : : "memory");
229}
230
231
237__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_fault_irq(void)
238{
239 __ASM volatile ("cpsid f" : : : "memory");
240}
241
242
248__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_BASEPRI(void)
249{
250 uint32_t result;
251
252 __ASM volatile ("MRS %0, basepri" : "=r" (result) );
253 return(result);
254}
255
256
262__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_BASEPRI(uint32_t value)
263{
264 __ASM volatile ("MSR basepri, %0" : : "r" (value) : "memory");
265}
266
267
274__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_BASEPRI_MAX(uint32_t value)
275{
276 __ASM volatile ("MSR basepri_max, %0" : : "r" (value) : "memory");
277}
278
279
285__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FAULTMASK(void)
286{
287 uint32_t result;
288
289 __ASM volatile ("MRS %0, faultmask" : "=r" (result) );
290 return(result);
291}
292
293
299__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask)
300{
301 __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory");
302}
303
304#endif /* (__CORTEX_M >= 0x03U) */
305
306
307#if (__CORTEX_M == 0x04U) || (__CORTEX_M == 0x07U)
308
314__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPSCR(void)
315{
316#if (__FPU_PRESENT == 1U) && (__FPU_USED == 1U)
317 uint32_t result;
318
319 /* Empty asm statement works as a scheduling barrier */
320 __ASM volatile ("");
321 __ASM volatile ("VMRS %0, fpscr" : "=r" (result) );
322 __ASM volatile ("");
323 return(result);
324#else
325 return(0);
326#endif
327}
328
329
335__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
336{
337#if (__FPU_PRESENT == 1U) && (__FPU_USED == 1U)
338 /* Empty asm statement works as a scheduling barrier */
339 __ASM volatile ("");
340 __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc");
341 __ASM volatile ("");
342#endif
343}
344
345#endif /* (__CORTEX_M == 0x04U) || (__CORTEX_M == 0x07U) */
346
347
348
352/* ########################## Core Instruction Access ######################### */
359/* Define macros for porting to both thumb1 and thumb2.
360 * For thumb1, use low register (r0-r7), specified by constraint "l"
361 * Otherwise, use general registers, specified by constraint "r" */
362#if defined (__thumb__) && !defined (__thumb2__)
363#define __CMSIS_GCC_OUT_REG(r) "=l" (r)
364#define __CMSIS_GCC_USE_REG(r) "l" (r)
365#else
366#define __CMSIS_GCC_OUT_REG(r) "=r" (r)
367#define __CMSIS_GCC_USE_REG(r) "r" (r)
368#endif
369
374__attribute__((always_inline)) __STATIC_INLINE void __NOP(void)
375{
376 __ASM volatile ("nop");
377}
378
379
384__attribute__((always_inline)) __STATIC_INLINE void __WFI(void)
385{
386 __ASM volatile ("wfi");
387}
388
389
395__attribute__((always_inline)) __STATIC_INLINE void __WFE(void)
396{
397 __ASM volatile ("wfe");
398}
399
400
405__attribute__((always_inline)) __STATIC_INLINE void __SEV(void)
406{
407 __ASM volatile ("sev");
408}
409
410
417__attribute__((always_inline)) __STATIC_INLINE void __ISB(void)
418{
419 __ASM volatile ("isb 0xF":::"memory");
420}
421
422
428__attribute__((always_inline)) __STATIC_INLINE void __DSB(void)
429{
430 __ASM volatile ("dsb 0xF":::"memory");
431}
432
433
439__attribute__((always_inline)) __STATIC_INLINE void __DMB(void)
440{
441 __ASM volatile ("dmb 0xF":::"memory");
442}
443
444
451__attribute__((always_inline)) __STATIC_INLINE uint32_t __REV(uint32_t value)
452{
453#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
454 return __builtin_bswap32(value);
455#else
456 uint32_t result;
457
458 __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
459 return(result);
460#endif
461}
462
463
470__attribute__((always_inline)) __STATIC_INLINE uint32_t __REV16(uint32_t value)
471{
472 uint32_t result;
473
474 __ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
475 return(result);
476}
477
478
485__attribute__((always_inline)) __STATIC_INLINE int32_t __REVSH(int32_t value)
486{
487#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
488 return (short)__builtin_bswap16(value);
489#else
490 int32_t result;
491
492 __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
493 return(result);
494#endif
495}
496
497
505__attribute__((always_inline)) __STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
506{
507 return (op1 >> op2) | (op1 << (32U - op2));
508}
509
510
518#define __BKPT(value) __ASM volatile ("bkpt "#value)
519
520
527__attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value)
528{
529 uint32_t result;
530
531#if (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U)
532 __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) );
533#else
534 int32_t s = 4 /*sizeof(v)*/ * 8 - 1; /* extra shift needed at end */
535
536 result = value; /* r will be reversed bits of v; first get LSB of v */
537 for (value >>= 1U; value; value >>= 1U)
538 {
539 result <<= 1U;
540 result |= value & 1U;
541 s--;
542 }
543 result <<= s; /* shift when v's highest bits are zero */
544#endif
545 return(result);
546}
547
548
555#define __CLZ __builtin_clz
556
557
558#if (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U)
559
566__attribute__((always_inline)) __STATIC_INLINE uint8_t __LDREXB(volatile uint8_t *addr)
567{
568 uint32_t result;
569
570#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
571 __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
572#else
573 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
574 accepted by assembler. So has to use following less efficient pattern.
575 */
576 __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
577#endif
578 return ((uint8_t) result); /* Add explicit type cast here */
579}
580
581
588__attribute__((always_inline)) __STATIC_INLINE uint16_t __LDREXH(volatile uint16_t *addr)
589{
590 uint32_t result;
591
592#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
593 __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
594#else
595 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
596 accepted by assembler. So has to use following less efficient pattern.
597 */
598 __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
599#endif
600 return ((uint16_t) result); /* Add explicit type cast here */
601}
602
603
610__attribute__((always_inline)) __STATIC_INLINE uint32_t __LDREXW(volatile uint32_t *addr)
611{
612 uint32_t result;
613
614 __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
615 return(result);
616}
617
618
627__attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
628{
629 uint32_t result;
630
631 __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
632 return(result);
633}
634
635
644__attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
645{
646 uint32_t result;
647
648 __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
649 return(result);
650}
651
652
661__attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
662{
663 uint32_t result;
664
665 __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
666 return(result);
667}
668
669
674__attribute__((always_inline)) __STATIC_INLINE void __CLREX(void)
675{
676 __ASM volatile ("clrex" ::: "memory");
677}
678
679
687#define __SSAT(ARG1,ARG2) \
688({ \
689 uint32_t __RES, __ARG1 = (ARG1); \
690 __ASM ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
691 __RES; \
692 })
693
694
702#define __USAT(ARG1,ARG2) \
703({ \
704 uint32_t __RES, __ARG1 = (ARG1); \
705 __ASM ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
706 __RES; \
707 })
708
709
717__attribute__((always_inline)) __STATIC_INLINE uint32_t __RRX(uint32_t value)
718{
719 uint32_t result;
720
721 __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
722 return(result);
723}
724
725
732__attribute__((always_inline)) __STATIC_INLINE uint8_t __LDRBT(volatile uint8_t *addr)
733{
734 uint32_t result;
735
736#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
737 __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*addr) );
738#else
739 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
740 accepted by assembler. So has to use following less efficient pattern.
741 */
742 __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
743#endif
744 return ((uint8_t) result); /* Add explicit type cast here */
745}
746
747
754__attribute__((always_inline)) __STATIC_INLINE uint16_t __LDRHT(volatile uint16_t *addr)
755{
756 uint32_t result;
757
758#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
759 __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*addr) );
760#else
761 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
762 accepted by assembler. So has to use following less efficient pattern.
763 */
764 __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
765#endif
766 return ((uint16_t) result); /* Add explicit type cast here */
767}
768
769
776__attribute__((always_inline)) __STATIC_INLINE uint32_t __LDRT(volatile uint32_t *addr)
777{
778 uint32_t result;
779
780 __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*addr) );
781 return(result);
782}
783
784
791__attribute__((always_inline)) __STATIC_INLINE void __STRBT(uint8_t value, volatile uint8_t *addr)
792{
793 __ASM volatile ("strbt %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) );
794}
795
796
803__attribute__((always_inline)) __STATIC_INLINE void __STRHT(uint16_t value, volatile uint16_t *addr)
804{
805 __ASM volatile ("strht %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) );
806}
807
808
815__attribute__((always_inline)) __STATIC_INLINE void __STRT(uint32_t value, volatile uint32_t *addr)
816{
817 __ASM volatile ("strt %1, %0" : "=Q" (*addr) : "r" (value) );
818}
819
820#endif /* (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U) */
821 /* end of group CMSIS_Core_InstructionInterface */
823
824
825/* ################### Compiler specific Intrinsics ########################### */
832#if (__CORTEX_M >= 0x04U) /* only for Cortex-M4 and above */
833
834__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
835{
836 uint32_t result;
837
838 __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
839 return(result);
840}
841
842__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
843{
844 uint32_t result;
845
846 __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
847 return(result);
848}
849
850__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
851{
852 uint32_t result;
853
854 __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
855 return(result);
856}
857
858__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
859{
860 uint32_t result;
861
862 __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
863 return(result);
864}
865
866__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
867{
868 uint32_t result;
869
870 __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
871 return(result);
872}
873
874__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
875{
876 uint32_t result;
877
878 __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
879 return(result);
880}
881
882
883__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
884{
885 uint32_t result;
886
887 __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
888 return(result);
889}
890
891__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
892{
893 uint32_t result;
894
895 __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
896 return(result);
897}
898
899__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
900{
901 uint32_t result;
902
903 __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
904 return(result);
905}
906
907__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
908{
909 uint32_t result;
910
911 __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
912 return(result);
913}
914
915__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
916{
917 uint32_t result;
918
919 __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
920 return(result);
921}
922
923__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
924{
925 uint32_t result;
926
927 __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
928 return(result);
929}
930
931
932__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
933{
934 uint32_t result;
935
936 __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
937 return(result);
938}
939
940__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
941{
942 uint32_t result;
943
944 __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
945 return(result);
946}
947
948__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
949{
950 uint32_t result;
951
952 __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
953 return(result);
954}
955
956__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
957{
958 uint32_t result;
959
960 __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
961 return(result);
962}
963
964__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
965{
966 uint32_t result;
967
968 __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
969 return(result);
970}
971
972__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
973{
974 uint32_t result;
975
976 __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
977 return(result);
978}
979
980__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
981{
982 uint32_t result;
983
984 __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
985 return(result);
986}
987
988__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
989{
990 uint32_t result;
991
992 __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
993 return(result);
994}
995
996__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
997{
998 uint32_t result;
999
1000 __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1001 return(result);
1002}
1003
1004__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
1005{
1006 uint32_t result;
1007
1008 __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1009 return(result);
1010}
1011
1012__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
1013{
1014 uint32_t result;
1015
1016 __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1017 return(result);
1018}
1019
1020__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
1021{
1022 uint32_t result;
1023
1024 __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1025 return(result);
1026}
1027
1028__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
1029{
1030 uint32_t result;
1031
1032 __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1033 return(result);
1034}
1035
1036__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
1037{
1038 uint32_t result;
1039
1040 __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1041 return(result);
1042}
1043
1044__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
1045{
1046 uint32_t result;
1047
1048 __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1049 return(result);
1050}
1051
1052__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
1053{
1054 uint32_t result;
1055
1056 __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1057 return(result);
1058}
1059
1060__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
1061{
1062 uint32_t result;
1063
1064 __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1065 return(result);
1066}
1067
1068__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
1069{
1070 uint32_t result;
1071
1072 __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1073 return(result);
1074}
1075
1076__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
1077{
1078 uint32_t result;
1079
1080 __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1081 return(result);
1082}
1083
1084__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
1085{
1086 uint32_t result;
1087
1088 __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1089 return(result);
1090}
1091
1092__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
1093{
1094 uint32_t result;
1095
1096 __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1097 return(result);
1098}
1099
1100__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
1101{
1102 uint32_t result;
1103
1104 __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1105 return(result);
1106}
1107
1108__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
1109{
1110 uint32_t result;
1111
1112 __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1113 return(result);
1114}
1115
1116__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
1117{
1118 uint32_t result;
1119
1120 __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1121 return(result);
1122}
1123
1124__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
1125{
1126 uint32_t result;
1127
1128 __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1129 return(result);
1130}
1131
1132__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
1133{
1134 uint32_t result;
1135
1136 __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1137 return(result);
1138}
1139
1140#define __SSAT16(ARG1,ARG2) \
1141({ \
1142 int32_t __RES, __ARG1 = (ARG1); \
1143 __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
1144 __RES; \
1145 })
1146
1147#define __USAT16(ARG1,ARG2) \
1148({ \
1149 uint32_t __RES, __ARG1 = (ARG1); \
1150 __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
1151 __RES; \
1152 })
1153
1154__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
1155{
1156 uint32_t result;
1157
1158 __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
1159 return(result);
1160}
1161
1162__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
1163{
1164 uint32_t result;
1165
1166 __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1167 return(result);
1168}
1169
1170__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
1171{
1172 uint32_t result;
1173
1174 __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
1175 return(result);
1176}
1177
1178__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
1179{
1180 uint32_t result;
1181
1182 __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1183 return(result);
1184}
1185
1186__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
1187{
1188 uint32_t result;
1189
1190 __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1191 return(result);
1192}
1193
1194__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
1195{
1196 uint32_t result;
1197
1198 __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1199 return(result);
1200}
1201
1202__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
1203{
1204 uint32_t result;
1205
1206 __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1207 return(result);
1208}
1209
1210__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
1211{
1212 uint32_t result;
1213
1214 __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1215 return(result);
1216}
1217
1218__attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
1219{
1220 union llreg_u{
1221 uint32_t w32[2];
1222 uint64_t w64;
1223 } llr;
1224 llr.w64 = acc;
1225
1226#ifndef __ARMEB__ /* Little endian */
1227 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1228#else /* Big endian */
1229 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1230#endif
1231
1232 return(llr.w64);
1233}
1234
1235__attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
1236{
1237 union llreg_u{
1238 uint32_t w32[2];
1239 uint64_t w64;
1240 } llr;
1241 llr.w64 = acc;
1242
1243#ifndef __ARMEB__ /* Little endian */
1244 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1245#else /* Big endian */
1246 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1247#endif
1248
1249 return(llr.w64);
1250}
1251
1252__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
1253{
1254 uint32_t result;
1255
1256 __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1257 return(result);
1258}
1259
1260__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
1261{
1262 uint32_t result;
1263
1264 __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1265 return(result);
1266}
1267
1268__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
1269{
1270 uint32_t result;
1271
1272 __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1273 return(result);
1274}
1275
1276__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
1277{
1278 uint32_t result;
1279
1280 __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1281 return(result);
1282}
1283
1284__attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
1285{
1286 union llreg_u{
1287 uint32_t w32[2];
1288 uint64_t w64;
1289 } llr;
1290 llr.w64 = acc;
1291
1292#ifndef __ARMEB__ /* Little endian */
1293 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1294#else /* Big endian */
1295 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1296#endif
1297
1298 return(llr.w64);
1299}
1300
1301__attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
1302{
1303 union llreg_u{
1304 uint32_t w32[2];
1305 uint64_t w64;
1306 } llr;
1307 llr.w64 = acc;
1308
1309#ifndef __ARMEB__ /* Little endian */
1310 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1311#else /* Big endian */
1312 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1313#endif
1314
1315 return(llr.w64);
1316}
1317
1318__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
1319{
1320 uint32_t result;
1321
1322 __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1323 return(result);
1324}
1325
1326__attribute__( ( always_inline ) ) __STATIC_INLINE int32_t __QADD( int32_t op1, int32_t op2)
1327{
1328 int32_t result;
1329
1330 __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1331 return(result);
1332}
1333
1334__attribute__( ( always_inline ) ) __STATIC_INLINE int32_t __QSUB( int32_t op1, int32_t op2)
1335{
1336 int32_t result;
1337
1338 __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1339 return(result);
1340}
1341
1342#define __PKHBT(ARG1,ARG2,ARG3) \
1343({ \
1344 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
1345 __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
1346 __RES; \
1347 })
1348
1349#define __PKHTB(ARG1,ARG2,ARG3) \
1350({ \
1351 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
1352 if (ARG3 == 0) \
1353 __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
1354 else \
1355 __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
1356 __RES; \
1357 })
1358
1359__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
1360{
1361 int32_t result;
1362
1363 __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
1364 return(result);
1365}
1366
1367#endif /* (__CORTEX_M >= 0x04) */
1371#if defined ( __GNUC__ )
1372#pragma GCC diagnostic pop
1373#endif
1374
1375#endif /* __CMSIS_GCC_H */
__attribute__((always_inline)) __STATIC_INLINE void __enable_irq(void)
Enable IRQ Interrupts.
Definition: cmsis_gcc.h:58
Definition: intercom.c:74