RTEMS CPU Kit with SuperCore  4.11.2
cpu.h
Go to the documentation of this file.
1 
10 /*
11  * COPYRIGHT (c) 1989-2011.
12  * On-Line Applications Research Corporation (OAR).
13  *
14  * The license and distribution terms for this file may be
15  * found in the file LICENSE in this distribution or at
16  * http://www.rtems.org/license/LICENSE.
17  */
18 
19 #ifndef _RTEMS_SCORE_CPU_H
20 #define _RTEMS_SCORE_CPU_H
21 
22 #ifndef ASM
23 #include <string.h> /* for memcpy */
24 #endif
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
30 #include <rtems/score/types.h>
31 #include <rtems/score/i386.h>
32 
33 #ifndef ASM
34 #include <rtems/score/interrupts.h> /* formerly in libcpu/cpu.h> */
35 #include <rtems/score/registers.h> /* formerly part of libcpu */
36 #endif
37 
38 /* conditional compilation parameters */
39 
40 #define CPU_INLINE_ENABLE_DISPATCH TRUE
41 
42 /*
43  * Does the CPU follow the simple vectored interrupt model?
44  *
45  * If TRUE, then RTEMS allocates the vector table it internally manages.
46  * If FALSE, then the BSP is assumed to allocate and manage the vector
47  * table
48  *
49  * PowerPC Specific Information:
50  *
51  * The PowerPC and x86 were the first to use the PIC interrupt model.
52  * They do not use the simple vectored interrupt model.
53  */
54 #define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
55 
56 /*
57  * i386 has an RTEMS allocated and managed interrupt stack.
58  */
59 
60 #define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
61 #define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
62 #define CPU_ALLOCATE_INTERRUPT_STACK TRUE
63 
64 /*
65  * Does the RTEMS invoke the user's ISR with the vector number and
66  * a pointer to the saved interrupt frame (1) or just the vector
67  * number (0)?
68  */
69 
70 #define CPU_ISR_PASSES_FRAME_POINTER 0
71 
72 /*
73  * Some family members have no FP, some have an FPU such as the i387
74  * for the i386, others have it built in (i486DX, Pentium).
75  */
76 
77 #ifdef __SSE__
78 #define CPU_HARDWARE_FP TRUE
79 #define CPU_SOFTWARE_FP FALSE
80 
81 #define CPU_ALL_TASKS_ARE_FP TRUE
82 #define CPU_IDLE_TASK_IS_FP TRUE
83 #define CPU_USE_DEFERRED_FP_SWITCH FALSE
84 #else /* __SSE__ */
85 
86 #if ( I386_HAS_FPU == 1 )
87 #define CPU_HARDWARE_FP TRUE /* i387 for i386 */
88 #else
89 #define CPU_HARDWARE_FP FALSE
90 #endif
91 #define CPU_SOFTWARE_FP FALSE
92 
93 #define CPU_ALL_TASKS_ARE_FP FALSE
94 #define CPU_IDLE_TASK_IS_FP FALSE
95 #if defined(RTEMS_SMP)
96  #define CPU_USE_DEFERRED_FP_SWITCH FALSE
97 #else
98  #define CPU_USE_DEFERRED_FP_SWITCH TRUE
99 #endif
100 #endif /* __SSE__ */
101 
102 #define CPU_STACK_GROWS_UP FALSE
103 #define CPU_STRUCTURE_ALIGNMENT
104 
105 #define CPU_TIMESTAMP_USE_INT64_INLINE TRUE
106 
107 /*
108  * Does this port provide a CPU dependent IDLE task implementation?
109  *
110  * If TRUE, then the routine _CPU_Thread_Idle_body
111  * must be provided and is the default IDLE thread body instead of
112  * _CPU_Thread_Idle_body.
113  *
114  * If FALSE, then use the generic IDLE thread body if the BSP does
115  * not provide one.
116  */
117 
118 #define CPU_PROVIDES_IDLE_THREAD_BODY TRUE
119 
120 /*
121  * Define what is required to specify how the network to host conversion
122  * routines are handled.
123  */
124 
125 #define CPU_BIG_ENDIAN FALSE
126 #define CPU_LITTLE_ENDIAN TRUE
127 
128 #define CPU_PER_CPU_CONTROL_SIZE 0
129 
130 #define I386_CONTEXT_CONTROL_EFLAGS_OFFSET 0
131 #define I386_CONTEXT_CONTROL_ESP_OFFSET 4
132 #define I386_CONTEXT_CONTROL_EBP_OFFSET 8
133 #define I386_CONTEXT_CONTROL_EBX_OFFSET 12
134 #define I386_CONTEXT_CONTROL_ESI_OFFSET 16
135 #define I386_CONTEXT_CONTROL_EDI_OFFSET 20
136 
137 #ifdef RTEMS_SMP
138  #define I386_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 24
139 #endif
140 
141 /* structures */
142 
143 #ifndef ASM
144 
145 /*
146  * Basic integer context for the i386 family.
147  */
148 
149 typedef struct {
150  uint32_t eflags; /* extended flags register */
151  void *esp; /* extended stack pointer register */
152  void *ebp; /* extended base pointer register */
153  uint32_t ebx; /* extended bx register */
154  uint32_t esi; /* extended source index register */
155  uint32_t edi; /* extended destination index flags register */
156 #ifdef RTEMS_SMP
157  volatile bool is_executing;
158 #endif
160 
161 #define _CPU_Context_Get_SP( _context ) \
162  (_context)->esp
163 
164 #ifdef RTEMS_SMP
165  static inline bool _CPU_Context_Get_is_executing(
166  const Context_Control *context
167  )
168  {
169  return context->is_executing;
170  }
171 
172  static inline void _CPU_Context_Set_is_executing(
173  Context_Control *context,
174  bool is_executing
175  )
176  {
177  context->is_executing = is_executing;
178  }
179 #endif
180 
181 /*
182  * FP context save area for the i387 numeric coprocessors.
183  */
184 #ifdef __SSE__
185 /* All FPU and SSE registers are volatile; hence, as long
186  * as we are within normally executing C code (including
187  * a task switch) there is no need for saving/restoring
188  * any of those registers.
189  * We must save/restore the full FPU/SSE context across
190  * interrupts and exceptions, however:
191  * - after ISR execution a _Thread_Dispatch() may happen
192  * and it is therefore necessary to save the FPU/SSE
193  * registers to be restored when control is returned
194  * to the interrupted task.
195  * - gcc may implicitly use FPU/SSE instructions in
196  * an ISR.
197  *
198  * Even though there is no explicit mentioning of the FPU
199  * control word in the SYSV ABI (i386) being non-volatile
200  * we maintain MXCSR and the FPU control-word for each task.
201  */
202 typedef struct {
203  uint32_t mxcsr;
204  uint16_t fpucw;
206 
207 #else
208 
209 typedef struct {
210  uint8_t fp_save_area[108]; /* context size area for I80387 */
211  /* 28 bytes for environment */
213 
214 #endif
215 
216 
217 /*
218  * The following structure defines the set of information saved
219  * on the current stack by RTEMS upon receipt of execptions.
220  *
221  * idtIndex is either the interrupt number or the trap/exception number.
222  * faultCode is the code pushed by the processor on some exceptions.
223  *
224  * Since the first registers are directly pushed by the CPU they
225  * may not respect 16-byte stack alignment, which is, however,
226  * mandatory for the SSE register area.
227  * Therefore, these registers are stored at an aligned address
228  * and a pointer is stored in the CPU_Exception_frame.
229  * If the executive was compiled without SSE support then
230  * this pointer is NULL.
231  */
232 
233 struct Context_Control_sse;
234 
235 typedef struct {
236  struct Context_Control_sse *fp_ctxt;
237  uint32_t edi;
238  uint32_t esi;
239  uint32_t ebp;
240  uint32_t esp0;
241  uint32_t ebx;
242  uint32_t edx;
243  uint32_t ecx;
244  uint32_t eax;
245  uint32_t idtIndex;
246  uint32_t faultCode;
247  uint32_t eip;
248  uint32_t cs;
249  uint32_t eflags;
251 
252 #ifdef __SSE__
253 typedef struct Context_Control_sse {
254  uint16_t fcw;
255  uint16_t fsw;
256  uint8_t ftw;
257  uint8_t res_1;
258  uint16_t fop;
259  uint32_t fpu_ip;
260  uint16_t cs;
261  uint16_t res_2;
262  uint32_t fpu_dp;
263  uint16_t ds;
264  uint16_t res_3;
265  uint32_t mxcsr;
266  uint32_t mxcsr_mask;
267  struct {
268  uint8_t fpreg[10];
269  uint8_t res_4[ 6];
270  } fp_mmregs[8];
271  uint8_t xmmregs[8][16];
272  uint8_t res_5[224];
274 __attribute__((aligned(16)))
275 ;
276 #endif
277 
278 typedef void (*cpuExcHandlerType) (CPU_Exception_frame*);
279 extern cpuExcHandlerType _currentExcHandler;
280 extern void rtems_exception_init_mngt(void);
281 
282 /*
283  * This port does not pass any frame info to the
284  * interrupt handler.
285  */
286 
287 typedef void CPU_Interrupt_frame;
288 
289 typedef enum {
290  I386_EXCEPTION_DIVIDE_BY_ZERO = 0,
291  I386_EXCEPTION_DEBUG = 1,
292  I386_EXCEPTION_NMI = 2,
293  I386_EXCEPTION_BREAKPOINT = 3,
294  I386_EXCEPTION_OVERFLOW = 4,
295  I386_EXCEPTION_BOUND = 5,
296  I386_EXCEPTION_ILLEGAL_INSTR = 6,
297  I386_EXCEPTION_MATH_COPROC_UNAVAIL = 7,
298  I386_EXCEPTION_DOUBLE_FAULT = 8,
299  I386_EXCEPTION_I386_COPROC_SEG_ERR = 9,
300  I386_EXCEPTION_INVALID_TSS = 10,
301  I386_EXCEPTION_SEGMENT_NOT_PRESENT = 11,
302  I386_EXCEPTION_STACK_SEGMENT_FAULT = 12,
303  I386_EXCEPTION_GENERAL_PROT_ERR = 13,
304  I386_EXCEPTION_PAGE_FAULT = 14,
305  I386_EXCEPTION_INTEL_RES15 = 15,
306  I386_EXCEPTION_FLOAT_ERROR = 16,
307  I386_EXCEPTION_ALIGN_CHECK = 17,
308  I386_EXCEPTION_MACHINE_CHECK = 18,
309  I386_EXCEPTION_ENTER_RDBG = 50 /* to enter manually RDBG */
310 
311 } Intel_symbolic_exception_name;
312 
313 
314 /*
315  * context size area for floating point
316  *
317  * NOTE: This is out of place on the i386 to avoid a forward reference.
318  */
319 
320 #define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
321 
322 /* variables */
323 
325 
326 #endif /* ASM */
327 
328 /* constants */
329 
330 /*
331  * This defines the number of levels and the mask used to pick those
332  * bits out of a thread mode.
333  */
334 
335 #define CPU_MODES_INTERRUPT_LEVEL 0x00000001 /* interrupt level in mode */
336 #define CPU_MODES_INTERRUPT_MASK 0x00000001 /* interrupt level in mode */
337 
338 /*
339  * extra stack required by the MPCI receive server thread
340  */
341 
342 #define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
343 
344 /*
345  * This is defined if the port has a special way to report the ISR nesting
346  * level. Most ports maintain the variable _ISR_Nest_level.
347  */
348 
349 #define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
350 
351 /*
352  * Minimum size of a thread's stack.
353  */
354 
355 #define CPU_STACK_MINIMUM_SIZE 4096
356 
357 #define CPU_SIZEOF_POINTER 4
358 
359 /*
360  * i386 is pretty tolerant of alignment. Just put things on 4 byte boundaries.
361  */
362 
363 #define CPU_ALIGNMENT 4
364 #define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
365 #define CPU_PARTITION_ALIGNMENT CPU_ALIGNMENT
366 
367 /*
368  * On i386 thread stacks require no further alignment after allocation
369  * from the Workspace. However, since gcc maintains 16-byte alignment
370  * we try to respect that. If you find an option to let gcc squeeze
371  * the stack more tightly then setting CPU_STACK_ALIGNMENT to 16 still
372  * doesn't waste much space since this only determines the *initial*
373  * alignment.
374  */
375 
376 #define CPU_STACK_ALIGNMENT 16
377 
378 /* macros */
379 
380 #ifndef ASM
381 /*
382  * ISR handler macros
383  *
384  * These macros perform the following functions:
385  * + initialize the RTEMS vector table
386  * + disable all maskable CPU interrupts
387  * + restore previous interrupt level (enable)
388  * + temporarily restore interrupts (flash)
389  * + set a particular level
390  */
391 
392 #define _CPU_ISR_Disable( _level ) i386_disable_interrupts( _level )
393 
394 #define _CPU_ISR_Enable( _level ) i386_enable_interrupts( _level )
395 
396 #define _CPU_ISR_Flash( _level ) i386_flash_interrupts( _level )
397 
398 #define _CPU_ISR_Set_level( _new_level ) \
399  { \
400  if ( _new_level ) __asm__ volatile ( "cli" ); \
401  else __asm__ volatile ( "sti" ); \
402  }
403 
404 uint32_t _CPU_ISR_Get_level( void );
405 
406 /* Make sure interrupt stack has space for ISR
407  * 'vector' arg at the top and that it is aligned
408  * properly.
409  */
410 
411 #define _CPU_Interrupt_stack_setup( _lo, _hi ) \
412  do { \
413  _hi = (void*)(((uintptr_t)(_hi) - 4) & ~ (CPU_STACK_ALIGNMENT - 1)); \
414  } while (0)
415 
416 #endif /* ASM */
417 
418 /* end of ISR handler macros */
419 
420 /*
421  * Context handler macros
422  *
423  * These macros perform the following functions:
424  * + initialize a context area
425  * + restart the current thread
426  * + calculate the initial pointer into a FP context area
427  * + initialize an FP context area
428  */
429 
430 #define CPU_EFLAGS_INTERRUPTS_ON 0x00003202
431 #define CPU_EFLAGS_INTERRUPTS_OFF 0x00003002
432 
433 #ifndef ASM
434 
435 /*
436  * Stack alignment note:
437  *
438  * We want the stack to look to the '_entry_point' routine
439  * like an ordinary stack frame as if '_entry_point' was
440  * called from C-code.
441  * Note that '_entry_point' is jumped-to by the 'ret'
442  * instruction returning from _CPU_Context_switch() or
443  * _CPU_Context_restore() thus popping the _entry_point
444  * from the stack.
445  * However, _entry_point expects a frame to look like this:
446  *
447  * args [_Thread_Handler expects no args, however]
448  * ------ (alignment boundary)
449  * SP-> return_addr return here when _entry_point returns which (never happens)
450  *
451  *
452  * Hence we must initialize the stack as follows
453  *
454  * [arg1 ]: n/a
455  * [arg0 (aligned)]: n/a
456  * [ret. addr ]: NULL
457  * SP-> [jump-target ]: _entry_point
458  *
459  * When Context_switch returns it pops the _entry_point from
460  * the stack which then finds a standard layout.
461  */
462 
463 
464 
465 #define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
466  _isr, _entry_point, _is_fp, _tls_area ) \
467  do { \
468  uint32_t _stack; \
469  \
470  (void) _is_fp; /* avoid warning for being unused */ \
471  if ( (_isr) ) (_the_context)->eflags = CPU_EFLAGS_INTERRUPTS_OFF; \
472  else (_the_context)->eflags = CPU_EFLAGS_INTERRUPTS_ON; \
473  \
474  _stack = ((uint32_t)(_stack_base)) + (_size); \
475  _stack &= ~ (CPU_STACK_ALIGNMENT - 1); \
476  _stack -= 2*sizeof(proc_ptr*); /* see above for why we need to do this */ \
477  *((proc_ptr *)(_stack)) = (_entry_point); \
478  (_the_context)->ebp = (void *) 0; \
479  (_the_context)->esp = (void *) _stack; \
480  } while (0)
481 
482 #define _CPU_Context_Restart_self( _the_context ) \
483  _CPU_Context_restore( (_the_context) );
484 
485 #if defined(RTEMS_SMP)
486  uint32_t _CPU_SMP_Initialize( void );
487 
488  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
489 
490  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
491 
492  void _CPU_SMP_Prepare_start_multitasking( void );
493 
494  uint32_t _CPU_SMP_Get_current_processor( void );
495 
496  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
497 
498  static inline void _CPU_SMP_Processor_event_broadcast( void )
499  {
500  __asm__ volatile ( "" : : : "memory" );
501  }
502 
503  static inline void _CPU_SMP_Processor_event_receive( void )
504  {
505  __asm__ volatile ( "" : : : "memory" );
506  }
507 #endif
508 
509 #define _CPU_Context_Fp_start( _base, _offset ) \
510  ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
511 
512 #define _CPU_Context_Initialize_fp( _fp_area ) \
513  { \
514  memcpy( *_fp_area, &_CPU_Null_fp_context, CPU_CONTEXT_FP_SIZE ); \
515  }
516 
517 /* end of Context handler macros */
518 
519 /*
520  * Fatal Error manager macros
521  *
522  * These macros perform the following functions:
523  * + disable interrupts and halt the CPU
524  */
525 
526 #define _CPU_Fatal_halt( _source, _error ) \
527  { \
528  uint32_t _error_lvalue = ( _error ); \
529  __asm__ volatile ( "cli ; \
530  movl %0,%%eax ; \
531  hlt" \
532  : "=r" ((_error_lvalue)) : "0" ((_error_lvalue)) \
533  ); \
534  }
535 
536 #endif /* ASM */
537 
538 /* end of Fatal Error manager macros */
539 
540 /*
541  * Bitfield handler macros
542  *
543  * These macros perform the following functions:
544  * + scan for the highest numbered (MSB) set in a 16 bit bitfield
545  */
546 
547 #define CPU_USE_GENERIC_BITFIELD_CODE FALSE
548 #define CPU_USE_GENERIC_BITFIELD_DATA FALSE
549 
550 #define _CPU_Bitfield_Find_first_bit( _value, _output ) \
551  { \
552  register uint16_t __value_in_register = (_value); \
553  \
554  _output = 0; \
555  \
556  __asm__ volatile ( "bsfw %0,%1 " \
557  : "=r" (__value_in_register), "=r" (_output) \
558  : "0" (__value_in_register), "1" (_output) \
559  ); \
560  }
561 
562 /* end of Bitfield handler macros */
563 
564 /*
565  * Priority handler macros
566  *
567  * These macros perform the following functions:
568  * + return a mask with the bit for this major/minor portion of
569  * of thread priority set.
570  * + translate the bit number returned by "Bitfield_find_first_bit"
571  * into an index into the thread ready chain bit maps
572  */
573 
574 #define _CPU_Priority_Mask( _bit_number ) \
575  ( 1 << (_bit_number) )
576 
577 #define _CPU_Priority_bits_index( _priority ) \
578  (_priority)
579 
580 /* functions */
581 
582 #ifndef ASM
583 /*
584  * _CPU_Initialize
585  *
586  * This routine performs CPU dependent initialization.
587  */
588 
589 void _CPU_Initialize(void);
590 
591 /*
592  * _CPU_ISR_install_raw_handler
593  *
594  * This routine installs a "raw" interrupt handler directly into the
595  * processor's vector table.
596  */
597 
599  uint32_t vector,
600  proc_ptr new_handler,
601  proc_ptr *old_handler
602 );
603 
604 /*
605  * _CPU_ISR_install_vector
606  *
607  * This routine installs an interrupt vector.
608  */
609 
611  uint32_t vector,
612  proc_ptr new_handler,
613  proc_ptr *old_handler
614 );
615 
616 /*
617  * _CPU_Thread_Idle_body
618  *
619  * Use the halt instruction of low power mode of a particular i386 model.
620  */
621 
622 #if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
623 
624 void *_CPU_Thread_Idle_body( uintptr_t ignored );
625 
626 #endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
627 
628 /*
629  * _CPU_Context_switch
630  *
631  * This routine switches from the run context to the heir context.
632  */
633 
635  Context_Control *run,
636  Context_Control *heir
637 );
638 
639 /*
640  * _CPU_Context_restore
641  *
642  * This routine is generally used only to restart self in an
643  * efficient manner and avoid stack conflicts.
644  */
645 
647  Context_Control *new_context
649 
650 /*
651  * _CPU_Context_save_fp
652  *
653  * This routine saves the floating point context passed to it.
654  */
655 
656 #ifdef __SSE__
657 #define _CPU_Context_save_fp(fp_context_pp) \
658  do { \
659  __asm__ __volatile__( \
660  "fstcw %0" \
661  :"=m"((*(fp_context_pp))->fpucw) \
662  ); \
663  __asm__ __volatile__( \
664  "stmxcsr %0" \
665  :"=m"((*(fp_context_pp))->mxcsr) \
666  ); \
667  } while (0)
668 #else
670  Context_Control_fp **fp_context_ptr
671 );
672 #endif
673 
674 /*
675  * _CPU_Context_restore_fp
676  *
677  * This routine restores the floating point context passed to it.
678  */
679 #ifdef __SSE__
680 #define _CPU_Context_restore_fp(fp_context_pp) \
681  do { \
682  __asm__ __volatile__( \
683  "fldcw %0" \
684  ::"m"((*(fp_context_pp))->fpucw) \
685  :"fpcr" \
686  ); \
687  __builtin_ia32_ldmxcsr(_Thread_Executing->fp_context->mxcsr); \
688  } while (0)
689 #else
691  Context_Control_fp **fp_context_ptr
692 );
693 #endif
694 
695 #ifdef __SSE__
696 #define _CPU_Context_Initialization_at_thread_begin() \
697  do { \
698  __asm__ __volatile__( \
699  "finit" \
700  : \
701  : \
702  :"st","st(1)","st(2)","st(3)", \
703  "st(4)","st(5)","st(6)","st(7)", \
704  "fpsr","fpcr" \
705  ); \
706  if ( _Thread_Executing->fp_context ) { \
707  _CPU_Context_restore_fp(&_Thread_Executing->fp_context); \
708  } \
709  } while (0)
710 #endif
711 
712 static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
713 {
714  /* TODO */
715 }
716 
717 static inline void _CPU_Context_validate( uintptr_t pattern )
718 {
719  while (1) {
720  /* TODO */
721  }
722 }
723 
725 
726 typedef uint32_t CPU_Counter_ticks;
727 
728 CPU_Counter_ticks _CPU_Counter_read( void );
729 
730 static inline CPU_Counter_ticks _CPU_Counter_difference(
731  CPU_Counter_ticks second,
732  CPU_Counter_ticks first
733 )
734 {
735  return second - first;
736 }
737 
738 #endif /* ASM */
739 
740 #ifdef __cplusplus
741 }
742 #endif
743 
744 #endif
void _CPU_ISR_install_vector(uint32_t vector, proc_ptr new_handler, proc_ptr *old_handler)
This routine installs an interrupt vector.
Definition: cpu.c:69
void _CPU_Context_validate(uintptr_t pattern)
Initializes and validates the CPU context with values derived from the pattern parameter.
Definition: cpu.h:1109
uint32_t _CPU_ISR_Get_level(void)
Return the current interrupt disable level for this task in the format used by the interrupt level po...
Definition: cpu.c:39
void _CPU_Context_restore(Context_Control *new_context)
This routine is generally used only to restart self in an efficient manner.
Definition: cpu_asm.c:112
void _CPU_Context_switch(Context_Control *run, Context_Control *heir)
CPU switch context.
Definition: cpu_asm.c:92
void _CPU_Context_volatile_clobber(uintptr_t pattern)
Clobbers all volatile registers with values derived from the pattern parameter.
Definition: cpu.h:1104
This defines the minimal set of integer and processor state registers that must be saved during a vol...
Definition: cpu.h:248
void _CPU_Initialize(void)
CPU initialization.
Definition: cpu.c:26
SCORE_EXTERN Context_Control_fp _CPU_Null_fp_context
This variable is optional.
Definition: cpu.h:494
uint32_t CPU_Counter_ticks
Unsigned integer type for CPU counter values.
Definition: cpu.h:1461
void _CPU_ISR_install_raw_handler(uint32_t vector, proc_ptr new_handler, proc_ptr *old_handler)
This routine installs a "raw" interrupt handler directly into the processor&#39;s vector table...
Definition: cpu.c:57
register struct Per_CPU_Control *_SPARC_Per_CPU_current __asm__("g6")
The pointer to the current per-CPU control is available via register g6.
void _CPU_Context_restore_fp(Context_Control_fp **fp_context_ptr)
This routine restores the floating point context passed to it.
Definition: cpu.c:176
Intel I386 CPU Dependent Source.
CPU_Counter_ticks _CPU_Counter_read(void)
Returns the current CPU counter value.
Definition: cpu.c:96
Intel I386 Interrupt Macros.
CPU_Counter_ticks _CPU_Counter_difference(CPU_Counter_ticks second, CPU_Counter_ticks first)
Returns the difference between the second and first CPU counter value.
Definition: cpu.h:1160
Intel CPU Constants and Definitions.
This defines the complete set of floating point registers that must be saved during any context switc...
Definition: cpu.h:294
void _CPU_Context_save_fp(Context_Control_fp **fp_context_ptr)
This routine saves the floating point context passed to it.
Definition: cpu.c:167
Definition: sse_test.c:126
#define RTEMS_COMPILER_NO_RETURN_ATTRIBUTE
The following macro is a compiler specific way to indicate that the method will NOT return to the cal...
Definition: basedefs.h:162
void _CPU_Exception_frame_print(const CPU_Exception_frame *frame)
Prints the exception frame via printk().
Definition: arm-exception-frame-print.c:46
void * _CPU_Thread_Idle_body(uintptr_t ignored)
This routine is the CPU dependent IDLE thread body.
Definition: cpu.c:125
The set of registers that specifies the complete processor state.
Definition: cpu.h:671
#define SCORE_EXTERN
The following ensures that all data is declared in the space of the initialization routine for either...
Definition: basedefs.h:81
void * proc_ptr
XXX: Eventually proc_ptr needs to disappear!!!
Definition: basedefs.h:329