RTEMS CPU Kit with SuperCore
score/cpu/sh/rtems/score/cpu.h
Go to the documentation of this file.
1 
5 /*
6  * This include file contains information pertaining to the Hitachi SH
7  * processor.
8  *
9  * Authors: Ralf Corsepius (corsepiu@faw.uni-ulm.de) and
10  * Bernd Becker (becker@faw.uni-ulm.de)
11  *
12  * COPYRIGHT (c) 1997-1998, FAW Ulm, Germany
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17  *
18  *
19  * COPYRIGHT (c) 1998-2006.
20  * On-Line Applications Research Corporation (OAR).
21  *
22  * The license and distribution terms for this file may be
23  * found in the file LICENSE in this distribution or at
24  * http://www.rtems.org/license/LICENSE.
25  */
26 
27 #ifndef _RTEMS_SCORE_CPU_H
28 #define _RTEMS_SCORE_CPU_H
29 
30 #ifdef __cplusplus
31 extern "C" {
32 #endif
33 
34 #include <rtems/score/types.h>
35 #include <rtems/score/sh.h>
36 
37 /* conditional compilation parameters */
38 
39 /*
40  * Should the calls to _Thread_Enable_dispatch be inlined?
41  *
42  * If TRUE, then they are inlined.
43  * If FALSE, then a subroutine call is made.
44  *
45  * Basically this is an example of the classic trade-off of size
46  * versus speed. Inlining the call (TRUE) typically increases the
47  * size of RTEMS while speeding up the enabling of dispatching.
48  * [NOTE: In general, the _Thread_Dispatch_disable_level will
49  * only be 0 or 1 unless you are in an interrupt handler and that
50  * interrupt handler invokes the executive.] When not inlined
51  * something calls _Thread_Enable_dispatch which in turns calls
52  * _Thread_Dispatch. If the enable dispatch is inlined, then
53  * one subroutine call is avoided entirely.]
54  */
55 
56 #define CPU_INLINE_ENABLE_DISPATCH FALSE
57 
58 /*
59  * Does the CPU follow the simple vectored interrupt model?
60  *
61  * If TRUE, then RTEMS allocates the vector table it internally manages.
62  * If FALSE, then the BSP is assumed to allocate and manage the vector
63  * table
64  *
65  * SH Specific Information:
66  *
67  * XXX document implementation including references if appropriate
68  */
69 #define CPU_SIMPLE_VECTORED_INTERRUPTS TRUE
70 
71 /*
72  * Does RTEMS manage a dedicated interrupt stack in software?
73  *
74  * If TRUE, then a stack is allocated in _ISR_Handler_initialization.
75  * If FALSE, nothing is done.
76  *
77  * If the CPU supports a dedicated interrupt stack in hardware,
78  * then it is generally the responsibility of the BSP to allocate it
79  * and set it up.
80  *
81  * If the CPU does not support a dedicated interrupt stack, then
82  * the porter has two options: (1) execute interrupts on the
83  * stack of the interrupted task, and (2) have RTEMS manage a dedicated
84  * interrupt stack.
85  *
86  * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
87  *
88  * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
89  * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is
90  * possible that both are FALSE for a particular CPU. Although it
91  * is unclear what that would imply about the interrupt processing
92  * procedure on that CPU.
93  */
94 
95 #define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
96 #define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
97 
98 /*
99  * We define the interrupt stack in the linker script
100  */
101 #define CPU_ALLOCATE_INTERRUPT_STACK FALSE
102 
103 /*
104  * Does the RTEMS invoke the user's ISR with the vector number and
105  * a pointer to the saved interrupt frame (1) or just the vector
106  * number (0)?
107  */
108 
109 #define CPU_ISR_PASSES_FRAME_POINTER 0
110 
111 /*
112  * Does the CPU have hardware floating point?
113  *
114  * If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
115  * If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
116  *
117  * We currently support sh1 only, which has no FPU, other SHes have an FPU
118  *
119  * The macro name "SH_HAS_FPU" should be made CPU specific.
120  * It indicates whether or not this CPU model has FP support. For
121  * example, it would be possible to have an i386_nofp CPU model
122  * which set this to false to indicate that you have an i386 without
123  * an i387 and wish to leave floating point support out of RTEMS.
124  */
125 
126 #if SH_HAS_FPU
127 #define CPU_HARDWARE_FP TRUE
128 #define CPU_SOFTWARE_FP FALSE
129 #else
130 #define CPU_SOFTWARE_FP FALSE
131 #define CPU_HARDWARE_FP FALSE
132 #endif
133 
134 /*
135  * Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
136  *
137  * If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
138  * If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
139  *
140  * If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
141  */
142 
143 #if SH_HAS_FPU
144 #define CPU_ALL_TASKS_ARE_FP TRUE
145 #else
146 #define CPU_ALL_TASKS_ARE_FP FALSE
147 #endif
148 
149 /*
150  * Should the IDLE task have a floating point context?
151  *
152  * If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
153  * and it has a floating point context which is switched in and out.
154  * If FALSE, then the IDLE task does not have a floating point context.
155  *
156  * Setting this to TRUE negatively impacts the time required to preempt
157  * the IDLE task from an interrupt because the floating point context
158  * must be saved as part of the preemption.
159  */
160 
161 #if SH_HAS_FPU
162 #define CPU_IDLE_TASK_IS_FP TRUE
163 #else
164 #define CPU_IDLE_TASK_IS_FP FALSE
165 #endif
166 
167 /*
168  * Should the saving of the floating point registers be deferred
169  * until a context switch is made to another different floating point
170  * task?
171  *
172  * If TRUE, then the floating point context will not be stored until
173  * necessary. It will remain in the floating point registers and not
174  * disturned until another floating point task is switched to.
175  *
176  * If FALSE, then the floating point context is saved when a floating
177  * point task is switched out and restored when the next floating point
178  * task is restored. The state of the floating point registers between
179  * those two operations is not specified.
180  *
181  * If the floating point context does NOT have to be saved as part of
182  * interrupt dispatching, then it should be safe to set this to TRUE.
183  *
184  * Setting this flag to TRUE results in using a different algorithm
185  * for deciding when to save and restore the floating point context.
186  * The deferred FP switch algorithm minimizes the number of times
187  * the FP context is saved and restored. The FP context is not saved
188  * until a context switch is made to another, different FP task.
189  * Thus in a system with only one FP task, the FP context will never
190  * be saved or restored.
191  */
192 
193 #if SH_HAS_FPU
194 #define CPU_USE_DEFERRED_FP_SWITCH FALSE
195 #else
196 #define CPU_USE_DEFERRED_FP_SWITCH TRUE
197 #endif
198 
199 /*
200  * Does this port provide a CPU dependent IDLE task implementation?
201  *
202  * If TRUE, then the routine _CPU_Thread_Idle_body
203  * must be provided and is the default IDLE thread body instead of
204  * _CPU_Thread_Idle_body.
205  *
206  * If FALSE, then use the generic IDLE thread body if the BSP does
207  * not provide one.
208  *
209  * This is intended to allow for supporting processors which have
210  * a low power or idle mode. When the IDLE thread is executed, then
211  * the CPU can be powered down.
212  *
213  * The order of precedence for selecting the IDLE thread body is:
214  *
215  * 1. BSP provided
216  * 2. CPU dependent (if provided)
217  * 3. generic (if no BSP and no CPU dependent)
218  */
219 
220 #define CPU_PROVIDES_IDLE_THREAD_BODY TRUE
221 
222 /*
223  * Does the stack grow up (toward higher addresses) or down
224  * (toward lower addresses)?
225  *
226  * If TRUE, then the grows upward.
227  * If FALSE, then the grows toward smaller addresses.
228  */
229 
230 #define CPU_STACK_GROWS_UP FALSE
231 
232 /*
233  * The following is the variable attribute used to force alignment
234  * of critical RTEMS structures. On some processors it may make
235  * sense to have these aligned on tighter boundaries than
236  * the minimum requirements of the compiler in order to have as
237  * much of the critical data area as possible in a cache line.
238  *
239  * The placement of this macro in the declaration of the variables
240  * is based on the syntactically requirements of the GNU C
241  * "__attribute__" extension. For example with GNU C, use
242  * the following to force a structures to a 32 byte boundary.
243  *
244  * __attribute__ ((aligned (32)))
245  *
246  * NOTE: Currently only the Priority Bit Map table uses this feature.
247  * To benefit from using this, the data must be heavily
248  * used so it will stay in the cache and used frequently enough
249  * in the executive to justify turning this on.
250  */
251 
252 #define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned(16)))
253 
254 #define CPU_TIMESTAMP_USE_INT64_INLINE TRUE
255 
256 /*
257  * Define what is required to specify how the network to host conversion
258  * routines are handled.
259  *
260  * NOTE: SHes can be big or little endian, the default is big endian
261  */
262 
263 /* __LITTLE_ENDIAN__ is defined if -ml is given to gcc */
264 #if defined(__LITTLE_ENDIAN__)
265 #define CPU_BIG_ENDIAN FALSE
266 #define CPU_LITTLE_ENDIAN TRUE
267 #else
268 #define CPU_BIG_ENDIAN TRUE
269 #define CPU_LITTLE_ENDIAN FALSE
270 #endif
271 
272 /*
273  * The following defines the number of bits actually used in the
274  * interrupt field of the task mode. How those bits map to the
275  * CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
276  */
277 
278 #define CPU_MODES_INTERRUPT_MASK 0x0000000f
279 
280 #define CPU_PER_CPU_CONTROL_SIZE 0
281 
282 /*
283  * Processor defined structures required for cpukit/score.
284  */
285 
286 /* may need to put some structures here. */
287 
288 /*
289  * Contexts
290  *
291  * Generally there are 2 types of context to save.
292  * 1. Interrupt registers to save
293  * 2. Task level registers to save
294  *
295  * This means we have the following 3 context items:
296  * 1. task level context stuff:: Context_Control
297  * 2. floating point task stuff:: Context_Control_fp
298  * 3. special interrupt level context :: Context_Control_interrupt
299  *
300  * On some processors, it is cost-effective to save only the callee
301  * preserved registers during a task context switch. This means
302  * that the ISR code needs to save those registers which do not
303  * persist across function calls. It is not mandatory to make this
304  * distinctions between the caller/callee saves registers for the
305  * purpose of minimizing context saved during task switch and on interrupts.
306  * If the cost of saving extra registers is minimal, simplicity is the
307  * choice. Save the same context on interrupt entry as for tasks in
308  * this case.
309  *
310  * Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
311  * care should be used in designing the context area.
312  *
313  * On some CPUs with hardware floating point support, the Context_Control_fp
314  * structure will not be used or it simply consist of an array of a
315  * fixed number of bytes. This is done when the floating point context
316  * is dumped by a "FP save context" type instruction and the format
317  * is not really defined by the CPU. In this case, there is no need
318  * to figure out the exact format -- only the size. Of course, although
319  * this is enough information for RTEMS, it is probably not enough for
320  * a debugger such as gdb. But that is another problem.
321  */
322 
323 typedef struct {
324  uint32_t *r15; /* stack pointer */
325 
326  uint32_t macl;
327  uint32_t mach;
328  uint32_t *pr;
329 
330  uint32_t *r14; /* frame pointer/call saved */
331 
332  uint32_t r13; /* call saved */
333  uint32_t r12; /* call saved */
334  uint32_t r11; /* call saved */
335  uint32_t r10; /* call saved */
336  uint32_t r9; /* call saved */
337  uint32_t r8; /* call saved */
338 
339  uint32_t *r7; /* arg in */
340  uint32_t *r6; /* arg in */
341 
342 #if 0
343  uint32_t *r5; /* arg in */
344  uint32_t *r4; /* arg in */
345 #endif
346 
347  uint32_t *r3; /* scratch */
348  uint32_t *r2; /* scratch */
349  uint32_t *r1; /* scratch */
350 
351  uint32_t *r0; /* arg return */
352 
353  uint32_t gbr;
354  uint32_t sr;
355 
357 
358 #define _CPU_Context_Get_SP( _context ) \
359  (_context)->r15
360 
361 typedef struct {
362 #if SH_HAS_FPU
363 #ifdef SH4_USE_X_REGISTERS
364  union {
365  float f[16];
366  double d[8];
367  } x;
368 #endif
369  union {
370  float f[16];
371  double d[8];
372  } r;
373  float fpul; /* fp communication register */
374  uint32_t fpscr; /* fp control register */
375 #endif /* SH_HAS_FPU */
377 
378 typedef struct {
379 } CPU_Interrupt_frame;
380 
381 /*
382  * This variable is optional. It is used on CPUs on which it is difficult
383  * to generate an "uninitialized" FP context. It is filled in by
384  * _CPU_Initialize and copied into the task's FP context area during
385  * _CPU_Context_Initialize.
386  */
387 
388 #if SH_HAS_FPU
389 SCORE_EXTERN Context_Control_fp _CPU_Null_fp_context;
390 #endif
391 
392 /*
393  * Nothing prevents the porter from declaring more CPU specific variables.
394  */
395 
396 /* XXX: if needed, put more variables here */
397 SCORE_EXTERN void CPU_delay( uint32_t microseconds );
398 
399 /*
400  * The size of the floating point context area. On some CPUs this
401  * will not be a "sizeof" because the format of the floating point
402  * area is not defined -- only the size is. This is usually on
403  * CPUs with a "floating point save context" instruction.
404  */
405 
406 #define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
407 
408 /*
409  * Amount of extra stack (above minimum stack size) required by
410  * MPCI receive server thread. Remember that in a multiprocessor
411  * system this thread must exist and be able to process all directives.
412  */
413 
414 #define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
415 
416 /*
417  * This defines the number of entries in the ISR_Vector_table managed
418  * by RTEMS.
419  */
420 
421 #define CPU_INTERRUPT_NUMBER_OF_VECTORS 256
422 #define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
423 
424 /*
425  * This is defined if the port has a special way to report the ISR nesting
426  * level. Most ports maintain the variable _ISR_Nest_level.
427  */
428 
429 #define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
430 
431 /*
432  * Should be large enough to run all RTEMS tests. This ensures
433  * that a "reasonable" small application should not have any problems.
434  *
435  * We have been able to run the sptests with this value, but have not
436  * been able to run the tmtest suite.
437  */
438 
439 #define CPU_STACK_MINIMUM_SIZE 4096
440 
441 #define CPU_SIZEOF_POINTER 4
442 
443 /*
444  * CPU's worst alignment requirement for data types on a byte boundary. This
445  * alignment does not take into account the requirements for the stack.
446  */
447 #if defined(__SH4__)
448 /* FIXME: sh3 and SH3E? */
449 #define CPU_ALIGNMENT 8
450 #else
451 #define CPU_ALIGNMENT 4
452 #endif
453 
454 /*
455  * This number corresponds to the byte alignment requirement for the
456  * heap handler. This alignment requirement may be stricter than that
457  * for the data types alignment specified by CPU_ALIGNMENT. It is
458  * common for the heap to follow the same alignment requirement as
459  * CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict enough for the heap,
460  * then this should be set to CPU_ALIGNMENT.
461  *
462  * NOTE: This does not have to be a power of 2. It does have to
463  * be greater or equal to than CPU_ALIGNMENT.
464  */
465 
466 #define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
467 
468 /*
469  * This number corresponds to the byte alignment requirement for memory
470  * buffers allocated by the partition manager. This alignment requirement
471  * may be stricter than that for the data types alignment specified by
472  * CPU_ALIGNMENT. It is common for the partition to follow the same
473  * alignment requirement as CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict
474  * enough for the partition, then this should be set to CPU_ALIGNMENT.
475  *
476  * NOTE: This does not have to be a power of 2. It does have to
477  * be greater or equal to than CPU_ALIGNMENT.
478  */
479 
480 #define CPU_PARTITION_ALIGNMENT CPU_ALIGNMENT
481 
482 /*
483  * This number corresponds to the byte alignment requirement for the
484  * stack. This alignment requirement may be stricter than that for the
485  * data types alignment specified by CPU_ALIGNMENT. If the CPU_ALIGNMENT
486  * is strict enough for the stack, then this should be set to 0.
487  *
488  * NOTE: This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
489  */
490 
491 #define CPU_STACK_ALIGNMENT CPU_ALIGNMENT
492 
493 /*
494  * ISR handler macros
495  */
496 
497 /*
498  * Support routine to initialize the RTEMS vector table after it is allocated.
499  *
500  * SH Specific Information: NONE
501  */
502 
503 #define _CPU_Initialize_vectors()
504 
505 /*
506  * Disable all interrupts for an RTEMS critical section. The previous
507  * level is returned in _level.
508  */
509 
510 #define _CPU_ISR_Disable( _level) \
511  sh_disable_interrupts( _level )
512 
513 /*
514  * Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
515  * This indicates the end of an RTEMS critical section. The parameter
516  * _level is not modified.
517  */
518 
519 #define _CPU_ISR_Enable( _level) \
520  sh_enable_interrupts( _level)
521 
522 /*
523  * This temporarily restores the interrupt to _level before immediately
524  * disabling them again. This is used to divide long RTEMS critical
525  * sections into two or more parts. The parameter _level is not
526  * modified.
527  */
528 
529 #define _CPU_ISR_Flash( _level) \
530  sh_flash_interrupts( _level)
531 
532 /*
533  * Map interrupt level in task mode onto the hardware that the CPU
534  * actually provides. Currently, interrupt levels which do not
535  * map onto the CPU in a generic fashion are undefined. Someday,
536  * it would be nice if these were "mapped" by the application
537  * via a callout. For example, m68k has 8 levels 0 - 7, levels
538  * 8 - 255 would be available for bsp/application specific meaning.
539  * This could be used to manage a programmable interrupt controller
540  * via the rtems_task_mode directive.
541  */
542 
543 #define _CPU_ISR_Set_level( _newlevel) \
544  sh_set_interrupt_level(_newlevel)
545 
546 uint32_t _CPU_ISR_Get_level( void );
547 
548 /* end of ISR handler macros */
549 
550 /* Context handler macros */
551 
552 /*
553  * Initialize the context to a state suitable for starting a
554  * task after a context restore operation. Generally, this
555  * involves:
556  *
557  * - setting a starting address
558  * - preparing the stack
559  * - preparing the stack and frame pointers
560  * - setting the proper interrupt level in the context
561  * - initializing the floating point context
562  *
563  * This routine generally does not set any unnecessary register
564  * in the context. The state of the "general data" registers is
565  * undefined at task start time.
566  *
567  * NOTE: This is_fp parameter is TRUE if the thread is to be a floating
568  * point thread. This is typically only used on CPUs where the
569  * FPU may be easily disabled by software such as on the SPARC
570  * where the PSR contains an enable FPU bit.
571  */
572 
573 /*
574  * FIXME: defined as a function for debugging - should be a macro
575  */
576 SCORE_EXTERN void _CPU_Context_Initialize(
577  Context_Control *_the_context,
578  void *_stack_base,
579  uint32_t _size,
580  uint32_t _isr,
581  void (*_entry_point)(void),
582  int _is_fp,
583  void *_tls_area );
584 
585 /*
586  * This routine is responsible for somehow restarting the currently
587  * executing task. If you are lucky, then all that is necessary
588  * is restoring the context. Otherwise, there will need to be
589  * a special assembly routine which does something special in this
590  * case. Context_Restore should work most of the time. It will
591  * not work if restarting self conflicts with the stack frame
592  * assumptions of restoring a context.
593  */
594 
595 #define _CPU_Context_Restart_self( _the_context ) \
596  _CPU_Context_restore( (_the_context) );
597 
598 /*
599  * The purpose of this macro is to allow the initial pointer into
600  * a floating point context area (used to save the floating point
601  * context) to be at an arbitrary place in the floating point
602  * context area.
603  *
604  * This is necessary because some FP units are designed to have
605  * their context saved as a stack which grows into lower addresses.
606  * Other FP units can be saved by simply moving registers into offsets
607  * from the base of the context area. Finally some FP units provide
608  * a "dump context" instruction which could fill in from high to low
609  * or low to high based on the whim of the CPU designers.
610  */
611 
612 #define _CPU_Context_Fp_start( _base, _offset ) \
613  ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
614 
615 /*
616  * This routine initializes the FP context area passed to it to.
617  * There are a few standard ways in which to initialize the
618  * floating point context. The code included for this macro assumes
619  * that this is a CPU in which a "initial" FP context was saved into
620  * _CPU_Null_fp_context and it simply copies it to the destination
621  * context passed to it.
622  *
623  * Other models include (1) not doing anything, and (2) putting
624  * a "null FP status word" in the correct place in the FP context.
625  * SH1, SH2, SH3 have no FPU, but the SH3e and SH4 have.
626  */
627 
628 #if SH_HAS_FPU
629 #define _CPU_Context_Initialize_fp( _destination ) \
630  do { \
631  *(*(_destination)) = _CPU_Null_fp_context;\
632  } while(0)
633 #else
634 #define _CPU_Context_Initialize_fp( _destination ) \
635  { }
636 #endif
637 
638 /* end of Context handler macros */
639 
640 /* Fatal Error manager macros */
641 
642 /*
643  * FIXME: Trap32 ???
644  *
645  * This routine copies _error into a known place -- typically a stack
646  * location or a register, optionally disables interrupts, and
647  * invokes a Trap32 Instruction which returns to the breakpoint
648  * routine of cmon.
649  */
650 
651 #ifdef BSP_FATAL_HALT
652  /* we manage the fatal error in the board support package */
653  void bsp_fatal_halt( uint32_t _error);
654 #define _CPU_Fatal_halt( _source, _error ) bsp_fatal_halt( _error)
655 #else
656 #define _CPU_Fatal_halt( _source, _error)\
657 { \
658  __asm__ volatile("mov.l %0,r0"::"m" (_error)); \
659  __asm__ volatile("mov #1, r4"); \
660  __asm__ volatile("trapa #34"); \
661 }
662 #endif
663 
664 /* end of Fatal Error manager macros */
665 
666 /* Bitfield handler macros */
667 
668 /*
669  * This routine sets _output to the bit number of the first bit
670  * set in _value. _value is of CPU dependent type Priority_bit_map_Word.
671  * This type may be either 16 or 32 bits wide although only the 16
672  * least significant bits will be used.
673  *
674  * There are a number of variables in using a "find first bit" type
675  * instruction.
676  *
677  * (1) What happens when run on a value of zero?
678  * (2) Bits may be numbered from MSB to LSB or vice-versa.
679  * (3) The numbering may be zero or one based.
680  * (4) The "find first bit" instruction may search from MSB or LSB.
681  *
682  * RTEMS guarantees that (1) will never happen so it is not a concern.
683  * (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
684  * _CPU_Priority_bits_index(). These three form a set of routines
685  * which must logically operate together. Bits in the _value are
686  * set and cleared based on masks built by _CPU_Priority_mask().
687  * The basic major and minor values calculated by _Priority_Major()
688  * and _Priority_Minor() are "massaged" by _CPU_Priority_bits_index()
689  * to properly range between the values returned by the "find first bit"
690  * instruction. This makes it possible for _Priority_Get_highest() to
691  * calculate the major and directly index into the minor table.
692  * This mapping is necessary to ensure that 0 (a high priority major/minor)
693  * is the first bit found.
694  *
695  * This entire "find first bit" and mapping process depends heavily
696  * on the manner in which a priority is broken into a major and minor
697  * components with the major being the 4 MSB of a priority and minor
698  * the 4 LSB. Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
699  * priority. And (15 << 4) + 14 corresponds to priority 254 -- the next
700  * to the lowest priority.
701  *
702  * If your CPU does not have a "find first bit" instruction, then
703  * there are ways to make do without it. Here are a handful of ways
704  * to implement this in software:
705  *
706  * - a series of 16 bit test instructions
707  * - a "binary search using if's"
708  * - _number = 0
709  * if _value > 0x00ff
710  * _value >>=8
711  * _number = 8;
712  *
713  * if _value > 0x0000f
714  * _value >=8
715  * _number += 4
716  *
717  * _number += bit_set_table[ _value ]
718  *
719  * where bit_set_table[ 16 ] has values which indicate the first
720  * bit set
721  */
722 
723 #define CPU_USE_GENERIC_BITFIELD_CODE TRUE
724 #define CPU_USE_GENERIC_BITFIELD_DATA TRUE
725 
726 #if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
727 
728 extern uint8_t _bit_set_table[];
729 
730 #define _CPU_Bitfield_Find_first_bit( _value, _output ) \
731  { \
732  _output = 0;\
733  if(_value > 0x00ff) \
734  { _value >>= 8; _output = 8; } \
735  if(_value > 0x000f) \
736  { _output += 4; _value >>= 4; } \
737  _output += _bit_set_table[ _value]; }
738 
739 #endif
740 
741 /* end of Bitfield handler macros */
742 
743 /*
744  * This routine builds the mask which corresponds to the bit fields
745  * as searched by _CPU_Bitfield_Find_first_bit(). See the discussion
746  * for that routine.
747  */
748 
749 #if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
750 
751 #define _CPU_Priority_Mask( _bit_number ) \
752  ( 1 << (_bit_number) )
753 
754 #endif
755 
756 /*
757  * This routine translates the bit numbers returned by
758  * _CPU_Bitfield_Find_first_bit() into something suitable for use as
759  * a major or minor component of a priority. See the discussion
760  * for that routine.
761  */
762 
763 #if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
764 
765 #define _CPU_Priority_bits_index( _priority ) \
766  (_priority)
767 
768 #endif
769 
770 /* end of Priority handler macros */
771 
772 /* functions */
773 
774 /*
775  * @brief CPU Initialize
776  *
777  * _CPU_Initialize
778  *
779  * This routine performs CPU dependent initialization.
780  */
781 void _CPU_Initialize(void);
782 
783 /*
784  * _CPU_ISR_install_raw_handler
785  *
786  * This routine installs a "raw" interrupt handler directly into the
787  * processor's vector table.
788  */
789 
791  uint32_t vector,
792  proc_ptr new_handler,
793  proc_ptr *old_handler
794 );
795 
796 /*
797  * _CPU_ISR_install_vector
798  *
799  * This routine installs an interrupt vector.
800  */
801 
803  uint32_t vector,
804  proc_ptr new_handler,
805  proc_ptr *old_handler
806 );
807 
808 /*
809  * _CPU_Install_interrupt_stack
810  *
811  * This routine installs the hardware interrupt stack pointer.
812  *
813  * NOTE: It needs only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
814  * is TRUE.
815  */
816 
817 void _CPU_Install_interrupt_stack( void );
818 
819 /*
820  * _CPU_Thread_Idle_body
821  *
822  * This routine is the CPU dependent IDLE thread body.
823  *
824  * NOTE: It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
825  * is TRUE.
826  */
827 
828 void *_CPU_Thread_Idle_body( uintptr_t ignored );
829 
830 /*
831  * _CPU_Context_switch
832  *
833  * This routine switches from the run context to the heir context.
834  */
835 
837  Context_Control *run,
838  Context_Control *heir
839 );
840 
841 /*
842  * _CPU_Context_restore
843  *
844  * This routine is generally used only to restart self in an
845  * efficient manner. It may simply be a label in _CPU_Context_switch.
846  */
847 
849  Context_Control *new_context
851 
852 /*
853  * @brief This routine saves the floating point context passed to it.
854  *
855  * _CPU_Context_save_fp
856  *
857  */
859  Context_Control_fp **fp_context_ptr
860 );
861 
862 /*
863  * @brief This routine restores the floating point context passed to it.
864  *
865  * _CPU_Context_restore_fp
866  *
867  */
869  Context_Control_fp **fp_context_ptr
870 );
871 
872 static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
873 {
874  /* TODO */
875 }
876 
877 static inline void _CPU_Context_validate( uintptr_t pattern )
878 {
879  while (1) {
880  /* TODO */
881  }
882 }
883 
884 /* FIXME */
885 typedef CPU_Interrupt_frame CPU_Exception_frame;
886 
887 void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
888 
889 typedef uint32_t CPU_Counter_ticks;
890 
891 CPU_Counter_ticks _CPU_Counter_read( void );
892 
893 static inline CPU_Counter_ticks _CPU_Counter_difference(
894  CPU_Counter_ticks second,
895  CPU_Counter_ticks first
896 )
897 {
898  return second - first;
899 }
900 
901 #ifdef __cplusplus
902 }
903 #endif
904 
905 #endif
void _CPU_Context_save_fp(Context_Control_fp **fp_context_ptr)
This routine saves the floating point context passed to it.
Definition: m68k/cpu.c:167
void _CPU_ISR_install_vector(uint32_t vector, proc_ptr new_handler, proc_ptr *old_handler)
This routine installs an interrupt vector.
Definition: avr/cpu.c:69
void _CPU_Context_validate(uintptr_t pattern)
Initializes and validates the CPU context with values derived from the pattern parameter.
Definition: score/cpu/mips/rtems/score/cpu.h:1109
uint32_t _CPU_ISR_Get_level(void)
Return the current interrupt disable level for this task in the format used by the interrupt level po...
Definition: avr/cpu.c:39
void _CPU_Context_restore(Context_Control *new_context)
This routine is generally used only to restart self in an efficient manner.
Definition: no_cpu/cpu_asm.c:112
void _CPU_Context_switch(Context_Control *run, Context_Control *heir)
CPU switch context.
Definition: no_cpu/cpu_asm.c:92
void _CPU_Context_volatile_clobber(uintptr_t pattern)
Clobbers all volatile registers with values derived from the pattern parameter.
Definition: score/cpu/mips/rtems/score/cpu.h:1104
This defines the minimal set of integer and processor state registers that must be saved during a vol...
Definition: score/cpu/arm/rtems/score/cpu.h:248
void _CPU_Context_restore_fp(Context_Control_fp **fp_context_ptr)
This routine restores the floating point context passed to it.
Definition: m68k/cpu.c:176
void _CPU_Initialize(void)
CPU initialization.
Definition: avr/cpu.c:26
uint32_t CPU_Counter_ticks
Unsigned integer type for CPU counter values.
Definition: score/cpu/no_cpu/rtems/score/cpu.h:1461
Hitachi SH CPU Department Source.
void _CPU_Install_interrupt_stack(void)
This routine installs the hardware interrupt stack pointer.
Definition: avr/cpu.c:101
CPU_Counter_ticks _CPU_Counter_read(void)
Returns the current CPU counter value.
Definition: epiphany/cpu.c:96
CPU_Counter_ticks _CPU_Counter_difference(CPU_Counter_ticks second, CPU_Counter_ticks first)
Returns the difference between the second and first CPU counter value.
Definition: score/cpu/mips/rtems/score/cpu.h:1160
This defines the complete set of floating point registers that must be saved during any context switc...
Definition: score/cpu/arm/rtems/score/cpu.h:294
void _CPU_ISR_install_raw_handler(uint32_t vector, proc_ptr new_handler, proc_ptr *old_handler)
This routine installs a "raw" interrupt handler directly into the processor&#39;s vector table...
Definition: avr/cpu.c:57
#define RTEMS_COMPILER_NO_RETURN_ATTRIBUTE
The following macro is a compiler specific way to indicate that the method will NOT return to the cal...
Definition: basedefs.h:162
void _CPU_Exception_frame_print(const CPU_Exception_frame *frame)
Prints the exception frame via printk().
Definition: arm-exception-frame-print.c:46
void * _CPU_Thread_Idle_body(uintptr_t ignored)
This routine is the CPU dependent IDLE thread body.
Definition: avr/cpu.c:125
The set of registers that specifies the complete processor state.
Definition: score/cpu/arm/rtems/score/cpu.h:671
#define SCORE_EXTERN
The following ensures that all data is declared in the space of the initialization routine for either...
Definition: basedefs.h:81
void * proc_ptr
XXX: Eventually proc_ptr needs to disappear!!!
Definition: basedefs.h:329