RTEMS CPU Kit with SuperCore  4.11.2
cpu.h
Go to the documentation of this file.
1 
7 /*
8  * This include file contains information pertaining to the ARM
9  * processor.
10  *
11  * Copyright (c) 2009-2015 embedded brains GmbH.
12  *
13  * Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
14  *
15  * Copyright (c) 2006 OAR Corporation
16  *
17  * Copyright (c) 2002 Advent Networks, Inc.
18  * Jay Monkman <jmonkman@adventnetworks.com>
19  *
20  * COPYRIGHT (c) 2000 Canon Research Centre France SA.
21  * Emmanuel Raguet, mailto:raguet@crf.canon.fr
22  *
23  * The license and distribution terms for this file may be
24  * found in the file LICENSE in this distribution or at
25  * http://www.rtems.org/license/LICENSE.
26  *
27  */
28 
29 #ifndef _RTEMS_SCORE_CPU_H
30 #define _RTEMS_SCORE_CPU_H
31 
32 #include <rtems/score/types.h>
33 #include <rtems/score/arm.h>
34 
35 #if defined(ARM_MULTILIB_ARCH_V4)
36 
46 #if defined(__thumb__) && !defined(__thumb2__)
47  #define ARM_SWITCH_REGISTERS uint32_t arm_switch_reg
48  #define ARM_SWITCH_TO_ARM ".align 2\nbx pc\n.arm\n"
49  #define ARM_SWITCH_BACK "add %[arm_switch_reg], pc, #1\nbx %[arm_switch_reg]\n.thumb\n"
50  #define ARM_SWITCH_OUTPUT [arm_switch_reg] "=&r" (arm_switch_reg)
51  #define ARM_SWITCH_ADDITIONAL_OUTPUT , ARM_SWITCH_OUTPUT
52 #else
53  #define ARM_SWITCH_REGISTERS
54  #define ARM_SWITCH_TO_ARM
55  #define ARM_SWITCH_BACK
56  #define ARM_SWITCH_OUTPUT
57  #define ARM_SWITCH_ADDITIONAL_OUTPUT
58 #endif
59 
65 #define ARM_PSR_N (1 << 31)
66 #define ARM_PSR_Z (1 << 30)
67 #define ARM_PSR_C (1 << 29)
68 #define ARM_PSR_V (1 << 28)
69 #define ARM_PSR_Q (1 << 27)
70 #define ARM_PSR_J (1 << 24)
71 #define ARM_PSR_GE_SHIFT 16
72 #define ARM_PSR_GE_MASK (0xf << ARM_PSR_GE_SHIFT)
73 #define ARM_PSR_E (1 << 9)
74 #define ARM_PSR_A (1 << 8)
75 #define ARM_PSR_I (1 << 7)
76 #define ARM_PSR_F (1 << 6)
77 #define ARM_PSR_T (1 << 5)
78 #define ARM_PSR_M_SHIFT 0
79 #define ARM_PSR_M_MASK (0x1f << ARM_PSR_M_SHIFT)
80 #define ARM_PSR_M_USR 0x10
81 #define ARM_PSR_M_FIQ 0x11
82 #define ARM_PSR_M_IRQ 0x12
83 #define ARM_PSR_M_SVC 0x13
84 #define ARM_PSR_M_ABT 0x17
85 #define ARM_PSR_M_HYP 0x1a
86 #define ARM_PSR_M_UND 0x1b
87 #define ARM_PSR_M_SYS 0x1f
88 
93 #endif /* defined(ARM_MULTILIB_ARCH_V4) */
94 
100 /* If someone uses THUMB we assume she wants minimal code size */
101 #ifdef __thumb__
102  #define CPU_INLINE_ENABLE_DISPATCH FALSE
103 #else
104  #define CPU_INLINE_ENABLE_DISPATCH TRUE
105 #endif
106 
107 #if defined(__ARMEL__)
108  #define CPU_BIG_ENDIAN FALSE
109  #define CPU_LITTLE_ENDIAN TRUE
110 #elif defined(__ARMEB__)
111  #define CPU_BIG_ENDIAN TRUE
112  #define CPU_LITTLE_ENDIAN FALSE
113 #else
114  #error "unknown endianness"
115 #endif
116 
117 /*
118  * The ARM uses the PIC interrupt model.
119  */
120 #define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
121 
122 #define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
123 
124 #define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
125 
126 #define CPU_ALLOCATE_INTERRUPT_STACK FALSE
127 
128 #define CPU_ISR_PASSES_FRAME_POINTER 0
129 
130 #define CPU_HARDWARE_FP FALSE
131 
132 #define CPU_SOFTWARE_FP FALSE
133 
134 #define CPU_ALL_TASKS_ARE_FP FALSE
135 
136 #define CPU_IDLE_TASK_IS_FP FALSE
137 
138 #define CPU_USE_DEFERRED_FP_SWITCH FALSE
139 
140 #if defined(ARM_MULTILIB_HAS_WFI)
141  #define CPU_PROVIDES_IDLE_THREAD_BODY TRUE
142 #else
143  #define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
144 #endif
145 
146 #define CPU_STACK_GROWS_UP FALSE
147 
148 #if defined(ARM_MULTILIB_CACHE_LINE_MAX_64)
149  #define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned ( 64 )))
150 #else
151  #define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned ( 32 )))
152 #endif
153 
154 #define CPU_TIMESTAMP_USE_STRUCT_TIMESPEC TRUE
155 
156 /*
157  * The interrupt mask disables only normal interrupts (IRQ).
158  *
159  * In order to support fast interrupts (FIQ) such that they can do something
160  * useful, we have to disable the operating system support for FIQs. Having
161  * operating system support for them would require that FIQs are disabled
162  * during critical sections of the operating system and application. At this
163  * level IRQs and FIQs would be equal. It is true that FIQs could interrupt
164  * the non critical sections of IRQs, so here they would have a small
165  * advantage. Without operating system support, the FIQs can execute at any
166  * time (of course not during the service of another FIQ). If someone needs
167  * operating system support for a FIQ, she can trigger a software interrupt and
168  * service the request in a two-step process.
169  */
170 #define CPU_MODES_INTERRUPT_MASK 0x1
171 
172 #define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
173 
174 #define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
175 
176 #define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
177 
178 #define CPU_STACK_MINIMUM_SIZE (1024 * 4)
179 
180 /* AAPCS, section 4.1, Fundamental Data Types */
181 #define CPU_SIZEOF_POINTER 4
182 
183 /* AAPCS, section 4.1, Fundamental Data Types */
184 #define CPU_ALIGNMENT 8
185 
186 #define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
187 
188 /* AAPCS, section 4.3.1, Aggregates */
189 #define CPU_PARTITION_ALIGNMENT 4
190 
191 /* AAPCS, section 5.2.1.2, Stack constraints at a public interface */
192 #define CPU_STACK_ALIGNMENT 8
193 
194 /*
195  * Bitfield handler macros.
196  *
197  * If we had a particularly fast function for finding the first
198  * bit set in a word, it would go here. Since we don't (*), we'll
199  * just use the universal macros.
200  *
201  * (*) On ARM V5 and later, there's a CLZ function which could be
202  * used to implement much quicker than the default macro.
203  */
204 
205 #define CPU_USE_GENERIC_BITFIELD_CODE TRUE
206 
207 #define CPU_USE_GENERIC_BITFIELD_DATA TRUE
208 
209 #define CPU_PER_CPU_CONTROL_SIZE 0
210 
213 #ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
214  #define ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET 44
215 #endif
216 
217 #ifdef ARM_MULTILIB_VFP
218  #define ARM_CONTEXT_CONTROL_D8_OFFSET 48
219 #endif
220 
221 #ifdef RTEMS_SMP
222  #ifdef ARM_MULTILIB_VFP
223  #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 112
224  #else
225  #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 48
226  #endif
227 #endif
228 
229 #define ARM_EXCEPTION_FRAME_SIZE 80
230 
231 #define ARM_EXCEPTION_FRAME_REGISTER_SP_OFFSET 52
232 
233 #define ARM_EXCEPTION_FRAME_VFP_CONTEXT_OFFSET 72
234 
235 #define ARM_VFP_CONTEXT_SIZE 264
236 
237 #ifndef ASM
238 
239 #ifdef __cplusplus
240 extern "C" {
241 #endif
242 
248 typedef struct {
249 #if defined(ARM_MULTILIB_ARCH_V4)
250  uint32_t register_cpsr;
251  uint32_t register_r4;
252  uint32_t register_r5;
253  uint32_t register_r6;
254  uint32_t register_r7;
255  uint32_t register_r8;
256  uint32_t register_r9;
257  uint32_t register_r10;
258  uint32_t register_fp;
259  uint32_t register_sp;
260  uint32_t register_lr;
261 #elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
262  uint32_t register_r4;
263  uint32_t register_r5;
264  uint32_t register_r6;
265  uint32_t register_r7;
266  uint32_t register_r8;
267  uint32_t register_r9;
268  uint32_t register_r10;
269  uint32_t register_r11;
270  void *register_lr;
271  void *register_sp;
272  uint32_t isr_nest_level;
273 #else
274  void *register_sp;
275 #endif
276 #ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
277  uint32_t thread_id;
278 #endif
279 #ifdef ARM_MULTILIB_VFP
280  uint64_t register_d8;
281  uint64_t register_d9;
282  uint64_t register_d10;
283  uint64_t register_d11;
284  uint64_t register_d12;
285  uint64_t register_d13;
286  uint64_t register_d14;
287  uint64_t register_d15;
288 #endif
289 #ifdef RTEMS_SMP
290  volatile bool is_executing;
291 #endif
293 
294 typedef struct {
295  /* Not supported */
297 
298 extern uint32_t arm_cpu_mode;
299 
300 static inline void _ARM_Data_memory_barrier( void )
301 {
302 #ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
303  __asm__ volatile ( "dmb" : : : "memory" );
304 #else
306 #endif
307 }
308 
309 static inline void _ARM_Data_synchronization_barrier( void )
310 {
311 #ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
312  __asm__ volatile ( "dsb" : : : "memory" );
313 #else
315 #endif
316 }
317 
318 static inline void _ARM_Instruction_synchronization_barrier( void )
319 {
320 #ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
321  __asm__ volatile ( "isb" : : : "memory" );
322 #else
324 #endif
325 }
326 
327 static inline uint32_t arm_interrupt_disable( void )
328 {
329  uint32_t level;
330 
331 #if defined(ARM_MULTILIB_ARCH_V4)
332  uint32_t arm_switch_reg;
333 
334  __asm__ volatile (
335  ARM_SWITCH_TO_ARM
336  "mrs %[level], cpsr\n"
337  "orr %[arm_switch_reg], %[level], #0x80\n"
338  "msr cpsr, %[arm_switch_reg]\n"
339  ARM_SWITCH_BACK
340  : [arm_switch_reg] "=&r" (arm_switch_reg), [level] "=&r" (level)
341  );
342 #elif defined(ARM_MULTILIB_ARCH_V7M)
343  uint32_t basepri = 0x80;
344 
345  __asm__ volatile (
346  "mrs %[level], basepri\n"
347  "msr basepri_max, %[basepri]\n"
348  : [level] "=&r" (level)
349  : [basepri] "r" (basepri)
350  );
351 #else
352  level = 0;
353 #endif
354 
355  return level;
356 }
357 
358 static inline void arm_interrupt_enable( uint32_t level )
359 {
360 #if defined(ARM_MULTILIB_ARCH_V4)
361  ARM_SWITCH_REGISTERS;
362 
363  __asm__ volatile (
364  ARM_SWITCH_TO_ARM
365  "msr cpsr, %[level]\n"
366  ARM_SWITCH_BACK
367  : ARM_SWITCH_OUTPUT
368  : [level] "r" (level)
369  );
370 #elif defined(ARM_MULTILIB_ARCH_V7M)
371  __asm__ volatile (
372  "msr basepri, %[level]\n"
373  :
374  : [level] "r" (level)
375  );
376 #endif
377 }
378 
379 static inline void arm_interrupt_flash( uint32_t level )
380 {
381 #if defined(ARM_MULTILIB_ARCH_V4)
382  uint32_t arm_switch_reg;
383 
384  __asm__ volatile (
385  ARM_SWITCH_TO_ARM
386  "mrs %[arm_switch_reg], cpsr\n"
387  "msr cpsr, %[level]\n"
388  "msr cpsr, %[arm_switch_reg]\n"
389  ARM_SWITCH_BACK
390  : [arm_switch_reg] "=&r" (arm_switch_reg)
391  : [level] "r" (level)
392  );
393 #elif defined(ARM_MULTILIB_ARCH_V7M)
394  uint32_t basepri;
395 
396  __asm__ volatile (
397  "mrs %[basepri], basepri\n"
398  "msr basepri, %[level]\n"
399  "msr basepri, %[basepri]\n"
400  : [basepri] "=&r" (basepri)
401  : [level] "r" (level)
402  );
403 #endif
404 }
405 
406 #define _CPU_ISR_Disable( _isr_cookie ) \
407  do { \
408  _isr_cookie = arm_interrupt_disable(); \
409  } while (0)
410 
411 #define _CPU_ISR_Enable( _isr_cookie ) \
412  arm_interrupt_enable( _isr_cookie )
413 
414 #define _CPU_ISR_Flash( _isr_cookie ) \
415  arm_interrupt_flash( _isr_cookie )
416 
417 void _CPU_ISR_Set_level( uint32_t level );
418 
419 uint32_t _CPU_ISR_Get_level( void );
420 
422  Context_Control *the_context,
423  void *stack_area_begin,
424  size_t stack_area_size,
425  uint32_t new_level,
426  void (*entry_point)( void ),
427  bool is_fp,
428  void *tls_area
429 );
430 
431 #define _CPU_Context_Get_SP( _context ) \
432  (_context)->register_sp
433 
434 #ifdef RTEMS_SMP
435  static inline bool _CPU_Context_Get_is_executing(
436  const Context_Control *context
437  )
438  {
439  return context->is_executing;
440  }
441 
442  static inline void _CPU_Context_Set_is_executing(
443  Context_Control *context,
444  bool is_executing
445  )
446  {
447  context->is_executing = is_executing;
448  }
449 #endif
450 
451 #define _CPU_Context_Restart_self( _the_context ) \
452  _CPU_Context_restore( (_the_context) );
453 
454 #define _CPU_Context_Fp_start( _base, _offset ) \
455  ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
456 
457 #define _CPU_Context_Initialize_fp( _destination ) \
458  do { \
459  *(*(_destination)) = _CPU_Null_fp_context; \
460  } while (0)
461 
462 #define _CPU_Fatal_halt( _source, _err ) \
463  do { \
464  uint32_t _level; \
465  uint32_t _error = _err; \
466  _CPU_ISR_Disable( _level ); \
467  (void) _level; \
468  __asm__ volatile ("mov r0, %0\n" \
469  : "=r" (_error) \
470  : "0" (_error) \
471  : "r0" ); \
472  while (1); \
473  } while (0);
474 
478 void _CPU_Initialize( void );
479 
481  uint32_t vector,
482  proc_ptr new_handler,
483  proc_ptr *old_handler
484 );
485 
490 
491 void _CPU_Context_restore( Context_Control *new_context )
493 
494 #if defined(ARM_MULTILIB_ARCH_V7M)
495  void _ARMV7M_Start_multitasking( Context_Control *heir )
497  #define _CPU_Start_multitasking _ARMV7M_Start_multitasking
498 #endif
499 
500 void _CPU_Context_volatile_clobber( uintptr_t pattern );
501 
502 void _CPU_Context_validate( uintptr_t pattern );
503 
504 #ifdef RTEMS_SMP
505  uint32_t _CPU_SMP_Initialize( void );
506 
507  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
508 
509  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
510 
511  void _CPU_SMP_Prepare_start_multitasking( void );
512 
513  static inline uint32_t _CPU_SMP_Get_current_processor( void )
514  {
515  uint32_t mpidr;
516 
517  /* Use ARMv7 Multiprocessor Affinity Register (MPIDR) */
518  __asm__ volatile (
519  "mrc p15, 0, %[mpidr], c0, c0, 5\n"
520  : [mpidr] "=&r" (mpidr)
521  );
522 
523  return mpidr & 0xffU;
524  }
525 
526  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
527 
528  static inline void _ARM_Send_event( void )
529  {
530  __asm__ volatile ( "sev" : : : "memory" );
531  }
532 
533  static inline void _ARM_Wait_for_event( void )
534  {
535  __asm__ volatile ( "wfe" : : : "memory" );
536  }
537 
538  static inline void _CPU_SMP_Processor_event_broadcast( void )
539  {
540  _ARM_Data_synchronization_barrier();
541  _ARM_Send_event();
542  }
543 
544  static inline void _CPU_SMP_Processor_event_receive( void )
545  {
546  _ARM_Wait_for_event();
547  _ARM_Data_memory_barrier();
548  }
549 #endif
550 
551 
552 static inline uint32_t CPU_swap_u32( uint32_t value )
553 {
554 #if defined(__thumb2__)
555  __asm__ volatile (
556  "rev %0, %0"
557  : "=r" (value)
558  : "0" (value)
559  );
560  return value;
561 #elif defined(__thumb__)
562  uint32_t byte1, byte2, byte3, byte4, swapped;
563 
564  byte4 = (value >> 24) & 0xff;
565  byte3 = (value >> 16) & 0xff;
566  byte2 = (value >> 8) & 0xff;
567  byte1 = value & 0xff;
568 
569  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
570  return swapped;
571 #else
572  uint32_t tmp = value; /* make compiler warnings go away */
573  __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
574  "BIC %1, %1, #0xff0000\n"
575  "MOV %0, %0, ROR #8\n"
576  "EOR %0, %0, %1, LSR #8\n"
577  : "=r" (value), "=r" (tmp)
578  : "0" (value), "1" (tmp));
579  return value;
580 #endif
581 }
582 
583 static inline uint16_t CPU_swap_u16( uint16_t value )
584 {
585 #if defined(__thumb2__)
586  __asm__ volatile (
587  "rev16 %0, %0"
588  : "=r" (value)
589  : "0" (value)
590  );
591  return value;
592 #else
593  return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
594 #endif
595 }
596 
597 typedef uint32_t CPU_Counter_ticks;
598 
599 CPU_Counter_ticks _CPU_Counter_read( void );
600 
601 CPU_Counter_ticks _CPU_Counter_difference(
602  CPU_Counter_ticks second,
603  CPU_Counter_ticks first
604 );
605 
606 #if CPU_PROVIDES_IDLE_THREAD_BODY == TRUE
607  void *_CPU_Thread_Idle_body( uintptr_t ignored );
608 #endif
609 
617 #if defined(ARM_MULTILIB_ARCH_V4)
618 
619 typedef enum {
620  ARM_EXCEPTION_RESET = 0,
621  ARM_EXCEPTION_UNDEF = 1,
622  ARM_EXCEPTION_SWI = 2,
623  ARM_EXCEPTION_PREF_ABORT = 3,
624  ARM_EXCEPTION_DATA_ABORT = 4,
625  ARM_EXCEPTION_RESERVED = 5,
626  ARM_EXCEPTION_IRQ = 6,
627  ARM_EXCEPTION_FIQ = 7,
628  MAX_EXCEPTIONS = 8,
629  ARM_EXCEPTION_MAKE_ENUM_32_BIT = 0xffffffff
630 } Arm_symbolic_exception_name;
631 
632 #endif /* defined(ARM_MULTILIB_ARCH_V4) */
633 
634 typedef struct {
635  uint32_t register_fpexc;
636  uint32_t register_fpscr;
637  uint64_t register_d0;
638  uint64_t register_d1;
639  uint64_t register_d2;
640  uint64_t register_d3;
641  uint64_t register_d4;
642  uint64_t register_d5;
643  uint64_t register_d6;
644  uint64_t register_d7;
645  uint64_t register_d8;
646  uint64_t register_d9;
647  uint64_t register_d10;
648  uint64_t register_d11;
649  uint64_t register_d12;
650  uint64_t register_d13;
651  uint64_t register_d14;
652  uint64_t register_d15;
653  uint64_t register_d16;
654  uint64_t register_d17;
655  uint64_t register_d18;
656  uint64_t register_d19;
657  uint64_t register_d20;
658  uint64_t register_d21;
659  uint64_t register_d22;
660  uint64_t register_d23;
661  uint64_t register_d24;
662  uint64_t register_d25;
663  uint64_t register_d26;
664  uint64_t register_d27;
665  uint64_t register_d28;
666  uint64_t register_d29;
667  uint64_t register_d30;
668  uint64_t register_d31;
670 
671 typedef struct {
672  uint32_t register_r0;
673  uint32_t register_r1;
674  uint32_t register_r2;
675  uint32_t register_r3;
676  uint32_t register_r4;
677  uint32_t register_r5;
678  uint32_t register_r6;
679  uint32_t register_r7;
680  uint32_t register_r8;
681  uint32_t register_r9;
682  uint32_t register_r10;
683  uint32_t register_r11;
684  uint32_t register_r12;
685  uint32_t register_sp;
686  void *register_lr;
687  void *register_pc;
688 #if defined(ARM_MULTILIB_ARCH_V4)
689  uint32_t register_cpsr;
690  Arm_symbolic_exception_name vector;
691 #elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
692  uint32_t register_xpsr;
693  uint32_t vector;
694 #endif
695  const ARM_VFP_context *vfp_context;
696  uint32_t reserved_for_stack_alignment;
698 
700 
702 
703 void _ARM_Exception_default( CPU_Exception_frame *frame );
704 
705 /*
706  * FIXME: In case your BSP uses this function, then convert it to use
707  * the shared start.S file for ARM.
708  */
709 void rtems_exception_init_mngt( void );
710 
713 #ifdef __cplusplus
714 }
715 #endif
716 
717 #endif /* ASM */
718 
719 #endif /* _RTEMS_SCORE_CPU_H */
void _CPU_ISR_install_vector(uint32_t vector, proc_ptr new_handler, proc_ptr *old_handler)
This routine installs an interrupt vector.
Definition: cpu.c:69
void _CPU_Context_validate(uintptr_t pattern)
Initializes and validates the CPU context with values derived from the pattern parameter.
Definition: cpu.h:1109
uint32_t _CPU_ISR_Get_level(void)
Return the current interrupt disable level for this task in the format used by the interrupt level po...
Definition: cpu.c:39
void _CPU_Context_restore(Context_Control *new_context)
This routine is generally used only to restart self in an efficient manner.
Definition: cpu_asm.c:112
void _CPU_Context_switch(Context_Control *run, Context_Control *heir)
CPU switch context.
Definition: cpu_asm.c:92
void _CPU_Context_volatile_clobber(uintptr_t pattern)
Clobbers all volatile registers with values derived from the pattern parameter.
Definition: cpu.h:1104
This defines the minimal set of integer and processor state registers that must be saved during a vol...
Definition: cpu.h:248
void _CPU_Initialize(void)
CPU initialization.
Definition: cpu.c:26
uint32_t CPU_Counter_ticks
Unsigned integer type for CPU counter values.
Definition: cpu.h:1461
This defines the set of integer and processor state registers that must be saved during an interrupt...
Definition: cpu.h:425
void _CPU_ISR_Set_level(uint32_t level)
Sets the hardware interrupt level by the level value.
Definition: cpu.c:62
register struct Per_CPU_Control *_SPARC_Per_CPU_current __asm__("g6")
The pointer to the current per-CPU control is available via register g6.
void _CPU_Context_Initialize(Context_Control *the_context, uint32_t *stack_base, uint32_t size, uint32_t new_level, void *entry_point, bool is_fp, void *tls_area)
Initialize the context to a state suitable for starting a task after a context restore operation...
Definition: cpu.c:183
CPU_Counter_ticks _CPU_Counter_read(void)
Returns the current CPU counter value.
Definition: cpu.c:96
CPU_Counter_ticks _CPU_Counter_difference(CPU_Counter_ticks second, CPU_Counter_ticks first)
Returns the difference between the second and first CPU counter value.
Definition: cpu.h:1160
This defines the complete set of floating point registers that must be saved during any context switc...
Definition: cpu.h:294
#define CPU_swap_u16(value)
This routine swaps a 16 bir quantity.
Definition: cpu.h:1253
#define RTEMS_COMPILER_NO_RETURN_ATTRIBUTE
The following macro is a compiler specific way to indicate that the method will NOT return to the cal...
Definition: basedefs.h:162
void _CPU_Exception_frame_print(const CPU_Exception_frame *frame)
Prints the exception frame via printk().
Definition: arm-exception-frame-print.c:46
void * _CPU_Thread_Idle_body(uintptr_t ignored)
This routine is the CPU dependent IDLE thread body.
Definition: cpu.c:125
The set of registers that specifies the complete processor state.
Definition: cpu.h:671
Definition: cpu.h:634
void * proc_ptr
XXX: Eventually proc_ptr needs to disappear!!!
Definition: basedefs.h:329
ARM Assembler Support API.
#define RTEMS_COMPILER_MEMORY_BARRIER()
The following macro is a compiler specific way to ensure that memory writes are not reordered around ...
Definition: basedefs.h:146