RTEMS CPU Kit with SuperCore  4.11.3
cpu.h
Go to the documentation of this file.
1 
7 /*
8  * COPYRIGHT (c) 1989-2012.
9  * On-Line Applications Research Corporation (OAR).
10  *
11  * COPYRIGHT (c) 1995 i-cubed ltd.
12  *
13  * To anyone who acknowledges that this file is provided "AS IS"
14  * without any express or implied warranty:
15  * permission to use, copy, modify, and distribute this file
16  * for any purpose is hereby granted without fee, provided that
17  * the above copyright notice and this notice appears in all
18  * copies, and that the name of i-cubed limited not be used in
19  * advertising or publicity pertaining to distribution of the
20  * software without specific, written prior permission.
21  * i-cubed limited makes no representations about the suitability
22  * of this software for any purpose.
23  *
24  * Copyright (c) 2001 Andy Dachs <a.dachs@sstl.co.uk>.
25  *
26  * Copyright (c) 2001 Surrey Satellite Technology Limited (SSTL).
27  *
28  * Copyright (c) 2010-2013 embedded brains GmbH.
29  *
30  * The license and distribution terms for this file may be
31  * found in the file LICENSE in this distribution or at
32  * http://www.rtems.org/license/LICENSE.
33  */
34 
35 #ifndef _RTEMS_SCORE_CPU_H
36 #define _RTEMS_SCORE_CPU_H
37 
38 #include <rtems/score/types.h>
39 #include <rtems/score/powerpc.h>
41 
42 #ifndef ASM
43  #include <string.h> /* for memset() */
44 #endif
45 
46 #ifdef __cplusplus
47 extern "C" {
48 #endif
49 
50 /* conditional compilation parameters */
51 
52 /*
53  * Should the calls to _Thread_Enable_dispatch be inlined?
54  *
55  * If TRUE, then they are inlined.
56  * If FALSE, then a subroutine call is made.
57  *
58  * Basically this is an example of the classic trade-off of size
59  * versus speed. Inlining the call (TRUE) typically increases the
60  * size of RTEMS while speeding up the enabling of dispatching.
61  * [NOTE: In general, the _Thread_Dispatch_disable_level will
62  * only be 0 or 1 unless you are in an interrupt handler and that
63  * interrupt handler invokes the executive.] When not inlined
64  * something calls _Thread_Enable_dispatch which in turns calls
65  * _Thread_Dispatch. If the enable dispatch is inlined, then
66  * one subroutine call is avoided entirely.]
67  */
68 
69 #define CPU_INLINE_ENABLE_DISPATCH FALSE
70 
71 /*
72  * Does this port provide a CPU dependent IDLE task implementation?
73  *
74  * If TRUE, then the routine _CPU_Thread_Idle_body
75  * must be provided and is the default IDLE thread body instead of
76  * _CPU_Thread_Idle_body.
77  *
78  * If FALSE, then use the generic IDLE thread body if the BSP does
79  * not provide one.
80  *
81  * This is intended to allow for supporting processors which have
82  * a low power or idle mode. When the IDLE thread is executed, then
83  * the CPU can be powered down.
84  *
85  * The order of precedence for selecting the IDLE thread body is:
86  *
87  * 1. BSP provided
88  * 2. CPU dependent (if provided)
89  * 3. generic (if no BSP and no CPU dependent)
90  */
91 
92 #define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
93 
94 /*
95  * Does the stack grow up (toward higher addresses) or down
96  * (toward lower addresses)?
97  *
98  * If TRUE, then the grows upward.
99  * If FALSE, then the grows toward smaller addresses.
100  */
101 
102 #define CPU_STACK_GROWS_UP FALSE
103 
104 /*
105  * The following is the variable attribute used to force alignment
106  * of critical RTEMS structures. On some processors it may make
107  * sense to have these aligned on tighter boundaries than
108  * the minimum requirements of the compiler in order to have as
109  * much of the critical data area as possible in a cache line.
110  *
111  * The placement of this macro in the declaration of the variables
112  * is based on the syntactically requirements of the GNU C
113  * "__attribute__" extension. For example with GNU C, use
114  * the following to force a structures to a 32 byte boundary.
115  *
116  * __attribute__ ((aligned (32)))
117  *
118  * NOTE: Currently only the Priority Bit Map table uses this feature.
119  * To benefit from using this, the data must be heavily
120  * used so it will stay in the cache and used frequently enough
121  * in the executive to justify turning this on.
122  */
123 
124 #define CPU_STRUCTURE_ALIGNMENT \
125  __attribute__ ((aligned (PPC_STRUCTURE_ALIGNMENT)))
126 
127 #define CPU_TIMESTAMP_USE_STRUCT_TIMESPEC TRUE
128 
129 /*
130  * Define what is required to specify how the network to host conversion
131  * routines are handled.
132  */
133 
134 #if defined(__BIG_ENDIAN__) || defined(_BIG_ENDIAN)
135 #define CPU_BIG_ENDIAN TRUE
136 #define CPU_LITTLE_ENDIAN FALSE
137 #else
138 #define CPU_BIG_ENDIAN FALSE
139 #define CPU_LITTLE_ENDIAN TRUE
140 #endif
141 
142 /*
143  * Does the CPU have hardware floating point?
144  *
145  * If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
146  * If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
147  *
148  * If there is a FP coprocessor such as the i387 or mc68881, then
149  * the answer is TRUE.
150  *
151  * The macro name "PPC_HAS_FPU" should be made CPU specific.
152  * It indicates whether or not this CPU model has FP support. For
153  * example, it would be possible to have an i386_nofp CPU model
154  * which set this to false to indicate that you have an i386 without
155  * an i387 and wish to leave floating point support out of RTEMS.
156  */
157 
158 #if ( PPC_HAS_FPU == 1 )
159 #define CPU_HARDWARE_FP TRUE
160 #define CPU_SOFTWARE_FP FALSE
161 #else
162 #define CPU_HARDWARE_FP FALSE
163 #define CPU_SOFTWARE_FP FALSE
164 #endif
165 
166 /*
167  * Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
168  *
169  * If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
170  * If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
171  *
172  * If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
173  *
174  * PowerPC Note: It appears the GCC can implicitly generate FPU
175  * and Altivec instructions when you least expect them. So make
176  * all tasks floating point.
177  */
178 
179 #define CPU_ALL_TASKS_ARE_FP CPU_HARDWARE_FP
180 
181 /*
182  * Should the IDLE task have a floating point context?
183  *
184  * If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
185  * and it has a floating point context which is switched in and out.
186  * If FALSE, then the IDLE task does not have a floating point context.
187  *
188  * Setting this to TRUE negatively impacts the time required to preempt
189  * the IDLE task from an interrupt because the floating point context
190  * must be saved as part of the preemption.
191  */
192 
193 #define CPU_IDLE_TASK_IS_FP FALSE
194 
195 #define CPU_PER_CPU_CONTROL_SIZE 0
196 
197 /*
198  * Processor defined structures required for cpukit/score.
199  */
200 
201 /*
202  * Contexts
203  *
204  * Generally there are 2 types of context to save.
205  * 1. Interrupt registers to save
206  * 2. Task level registers to save
207  *
208  * This means we have the following 3 context items:
209  * 1. task level context stuff:: Context_Control
210  * 2. floating point task stuff:: Context_Control_fp
211  * 3. special interrupt level context :: Context_Control_interrupt
212  *
213  * On some processors, it is cost-effective to save only the callee
214  * preserved registers during a task context switch. This means
215  * that the ISR code needs to save those registers which do not
216  * persist across function calls. It is not mandatory to make this
217  * distinctions between the caller/callee saves registers for the
218  * purpose of minimizing context saved during task switch and on interrupts.
219  * If the cost of saving extra registers is minimal, simplicity is the
220  * choice. Save the same context on interrupt entry as for tasks in
221  * this case.
222  *
223  * Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
224  * care should be used in designing the context area.
225  *
226  * On some CPUs with hardware floating point support, the Context_Control_fp
227  * structure will not be used or it simply consist of an array of a
228  * fixed number of bytes. This is done when the floating point context
229  * is dumped by a "FP save context" type instruction and the format
230  * is not really defined by the CPU. In this case, there is no need
231  * to figure out the exact format -- only the size. Of course, although
232  * this is enough information for RTEMS, it is probably not enough for
233  * a debugger such as gdb. But that is another problem.
234  */
235 
236 #ifndef __SPE__
237  #define PPC_GPR_TYPE uint32_t
238  #define PPC_GPR_SIZE 4
239  #define PPC_GPR_LOAD lwz
240  #define PPC_GPR_STORE stw
241 #else
242  #define PPC_GPR_TYPE uint64_t
243  #define PPC_GPR_SIZE 8
244  #define PPC_GPR_LOAD evldd
245  #define PPC_GPR_STORE evstdd
246 #endif
247 
248 #ifndef ASM
249 
250 /*
251  * Non-volatile context according to E500ABIUG, EABI and 32-bit TLS (according
252  * to "Power Architecture 32-bit Application Binary Interface Supplement 1.0 -
253  * Linux and Embedded")
254  */
255 typedef struct {
256  uint32_t gpr1;
257  uint32_t msr;
258  uint32_t lr;
259  uint32_t cr;
260  PPC_GPR_TYPE gpr14;
261  PPC_GPR_TYPE gpr15;
262  PPC_GPR_TYPE gpr16;
263  PPC_GPR_TYPE gpr17;
264  PPC_GPR_TYPE gpr18;
265  PPC_GPR_TYPE gpr19;
266  PPC_GPR_TYPE gpr20;
267  PPC_GPR_TYPE gpr21;
268  PPC_GPR_TYPE gpr22;
269  PPC_GPR_TYPE gpr23;
270  PPC_GPR_TYPE gpr24;
271  PPC_GPR_TYPE gpr25;
272  PPC_GPR_TYPE gpr26;
273  PPC_GPR_TYPE gpr27;
274  PPC_GPR_TYPE gpr28;
275  PPC_GPR_TYPE gpr29;
276  PPC_GPR_TYPE gpr30;
277  PPC_GPR_TYPE gpr31;
278  uint32_t gpr2;
279  #if defined(PPC_MULTILIB_ALTIVEC)
280  uint32_t reserved_for_alignment;
281  uint8_t v20[16];
282  uint8_t v21[16];
283  uint8_t v22[16];
284  uint8_t v23[16];
285  uint8_t v24[16];
286  uint8_t v25[16];
287  uint8_t v26[16];
288  uint8_t v27[16];
289  uint8_t v28[16];
290  uint8_t v29[16];
291  uint8_t v30[16];
292  uint8_t v31[16];
293  uint32_t vrsave;
294  #elif defined(__ALTIVEC__)
295  /*
296  * 12 non-volatile vector registers, cache-aligned area for vscr/vrsave
297  * and padding to ensure cache-alignment. Unfortunately, we can't verify
298  * the cache line size here in the cpukit but altivec support code will
299  * produce an error if this is ever different from 32 bytes.
300  *
301  * Note: it is the BSP/CPU-support's responsibility to save/restore
302  * volatile vregs across interrupts and exceptions.
303  */
304  uint8_t altivec[16*12 + 32 + PPC_DEFAULT_CACHE_LINE_SIZE];
305  #endif
306  #if defined(PPC_MULTILIB_FPU)
307  double f14;
308  double f15;
309  double f16;
310  double f17;
311  double f18;
312  double f19;
313  double f20;
314  double f21;
315  double f22;
316  double f23;
317  double f24;
318  double f25;
319  double f26;
320  double f27;
321  double f28;
322  double f29;
323  double f30;
324  double f31;
325  #endif
326  #if defined(RTEMS_SMP)
327  /*
328  * This item is at the structure end, so that we can use dcbz for the
329  * previous items to optimize the context switch. We must not set this
330  * item to zero via the dcbz.
331  */
332  volatile uint32_t is_executing;
333  #endif
334 } ppc_context;
335 
336 typedef struct {
337  uint8_t context [
338  PPC_DEFAULT_CACHE_LINE_SIZE
339  + sizeof(ppc_context)
340  + (sizeof(ppc_context) % PPC_DEFAULT_CACHE_LINE_SIZE == 0
341  ? 0
342  : PPC_DEFAULT_CACHE_LINE_SIZE
343  - sizeof(ppc_context) % PPC_DEFAULT_CACHE_LINE_SIZE)
344  ];
346 
347 static inline ppc_context *ppc_get_context( const Context_Control *context )
348 {
349  uintptr_t clsz = PPC_DEFAULT_CACHE_LINE_SIZE;
350  uintptr_t mask = clsz - 1;
351  uintptr_t addr = (uintptr_t) context;
352 
353  return (ppc_context *) ((addr & ~mask) + clsz);
354 }
355 
356 #define _CPU_Context_Get_SP( _context ) \
357  ppc_get_context(_context)->gpr1
358 
359 #ifdef RTEMS_SMP
360  static inline bool _CPU_Context_Get_is_executing(
361  const Context_Control *context
362  )
363  {
364  return ppc_get_context(context)->is_executing;
365  }
366 
367  static inline void _CPU_Context_Set_is_executing(
368  Context_Control *context,
369  bool is_executing
370  )
371  {
372  ppc_get_context(context)->is_executing = is_executing;
373  }
374 #endif
375 #endif /* ASM */
376 
377 #define PPC_CONTEXT_OFFSET_GPR1 (PPC_DEFAULT_CACHE_LINE_SIZE + 0)
378 #define PPC_CONTEXT_OFFSET_MSR (PPC_DEFAULT_CACHE_LINE_SIZE + 4)
379 #define PPC_CONTEXT_OFFSET_LR (PPC_DEFAULT_CACHE_LINE_SIZE + 8)
380 #define PPC_CONTEXT_OFFSET_CR (PPC_DEFAULT_CACHE_LINE_SIZE + 12)
381 
382 #define PPC_CONTEXT_GPR_OFFSET( gpr ) \
383  (((gpr) - 14) * PPC_GPR_SIZE + PPC_DEFAULT_CACHE_LINE_SIZE + 16)
384 
385 #define PPC_CONTEXT_OFFSET_GPR14 PPC_CONTEXT_GPR_OFFSET( 14 )
386 #define PPC_CONTEXT_OFFSET_GPR15 PPC_CONTEXT_GPR_OFFSET( 15 )
387 #define PPC_CONTEXT_OFFSET_GPR16 PPC_CONTEXT_GPR_OFFSET( 16 )
388 #define PPC_CONTEXT_OFFSET_GPR17 PPC_CONTEXT_GPR_OFFSET( 17 )
389 #define PPC_CONTEXT_OFFSET_GPR18 PPC_CONTEXT_GPR_OFFSET( 18 )
390 #define PPC_CONTEXT_OFFSET_GPR19 PPC_CONTEXT_GPR_OFFSET( 19 )
391 #define PPC_CONTEXT_OFFSET_GPR20 PPC_CONTEXT_GPR_OFFSET( 20 )
392 #define PPC_CONTEXT_OFFSET_GPR21 PPC_CONTEXT_GPR_OFFSET( 21 )
393 #define PPC_CONTEXT_OFFSET_GPR22 PPC_CONTEXT_GPR_OFFSET( 22 )
394 #define PPC_CONTEXT_OFFSET_GPR23 PPC_CONTEXT_GPR_OFFSET( 23 )
395 #define PPC_CONTEXT_OFFSET_GPR24 PPC_CONTEXT_GPR_OFFSET( 24 )
396 #define PPC_CONTEXT_OFFSET_GPR25 PPC_CONTEXT_GPR_OFFSET( 25 )
397 #define PPC_CONTEXT_OFFSET_GPR26 PPC_CONTEXT_GPR_OFFSET( 26 )
398 #define PPC_CONTEXT_OFFSET_GPR27 PPC_CONTEXT_GPR_OFFSET( 27 )
399 #define PPC_CONTEXT_OFFSET_GPR28 PPC_CONTEXT_GPR_OFFSET( 28 )
400 #define PPC_CONTEXT_OFFSET_GPR29 PPC_CONTEXT_GPR_OFFSET( 29 )
401 #define PPC_CONTEXT_OFFSET_GPR30 PPC_CONTEXT_GPR_OFFSET( 30 )
402 #define PPC_CONTEXT_OFFSET_GPR31 PPC_CONTEXT_GPR_OFFSET( 31 )
403 #define PPC_CONTEXT_OFFSET_GPR2 PPC_CONTEXT_GPR_OFFSET( 32 )
404 
405 #ifdef PPC_MULTILIB_ALTIVEC
406  #define PPC_CONTEXT_OFFSET_V( v ) \
407  ( ( ( v ) - 20 ) * 16 + PPC_DEFAULT_CACHE_LINE_SIZE + 96 )
408  #define PPC_CONTEXT_OFFSET_V20 PPC_CONTEXT_OFFSET_V( 20 )
409  #define PPC_CONTEXT_OFFSET_V21 PPC_CONTEXT_OFFSET_V( 21 )
410  #define PPC_CONTEXT_OFFSET_V22 PPC_CONTEXT_OFFSET_V( 22 )
411  #define PPC_CONTEXT_OFFSET_V23 PPC_CONTEXT_OFFSET_V( 23 )
412  #define PPC_CONTEXT_OFFSET_V24 PPC_CONTEXT_OFFSET_V( 24 )
413  #define PPC_CONTEXT_OFFSET_V25 PPC_CONTEXT_OFFSET_V( 25 )
414  #define PPC_CONTEXT_OFFSET_V26 PPC_CONTEXT_OFFSET_V( 26 )
415  #define PPC_CONTEXT_OFFSET_V27 PPC_CONTEXT_OFFSET_V( 27 )
416  #define PPC_CONTEXT_OFFSET_V28 PPC_CONTEXT_OFFSET_V( 28 )
417  #define PPC_CONTEXT_OFFSET_V29 PPC_CONTEXT_OFFSET_V( 29 )
418  #define PPC_CONTEXT_OFFSET_V30 PPC_CONTEXT_OFFSET_V( 30 )
419  #define PPC_CONTEXT_OFFSET_V31 PPC_CONTEXT_OFFSET_V( 31 )
420  #define PPC_CONTEXT_OFFSET_VRSAVE PPC_CONTEXT_OFFSET_V( 32 )
421  #define PPC_CONTEXT_OFFSET_F( f ) \
422  ( ( ( f ) - 14 ) * 8 + PPC_DEFAULT_CACHE_LINE_SIZE + 296 )
423 #else
424  #define PPC_CONTEXT_OFFSET_F( f ) \
425  ( ( ( f ) - 14 ) * 8 + PPC_DEFAULT_CACHE_LINE_SIZE + 96 )
426 #endif
427 
428 #ifdef PPC_MULTILIB_FPU
429  #define PPC_CONTEXT_OFFSET_F14 PPC_CONTEXT_OFFSET_F( 14 )
430  #define PPC_CONTEXT_OFFSET_F15 PPC_CONTEXT_OFFSET_F( 15 )
431  #define PPC_CONTEXT_OFFSET_F16 PPC_CONTEXT_OFFSET_F( 16 )
432  #define PPC_CONTEXT_OFFSET_F17 PPC_CONTEXT_OFFSET_F( 17 )
433  #define PPC_CONTEXT_OFFSET_F18 PPC_CONTEXT_OFFSET_F( 18 )
434  #define PPC_CONTEXT_OFFSET_F19 PPC_CONTEXT_OFFSET_F( 19 )
435  #define PPC_CONTEXT_OFFSET_F20 PPC_CONTEXT_OFFSET_F( 20 )
436  #define PPC_CONTEXT_OFFSET_F21 PPC_CONTEXT_OFFSET_F( 21 )
437  #define PPC_CONTEXT_OFFSET_F22 PPC_CONTEXT_OFFSET_F( 22 )
438  #define PPC_CONTEXT_OFFSET_F23 PPC_CONTEXT_OFFSET_F( 23 )
439  #define PPC_CONTEXT_OFFSET_F24 PPC_CONTEXT_OFFSET_F( 24 )
440  #define PPC_CONTEXT_OFFSET_F25 PPC_CONTEXT_OFFSET_F( 25 )
441  #define PPC_CONTEXT_OFFSET_F26 PPC_CONTEXT_OFFSET_F( 26 )
442  #define PPC_CONTEXT_OFFSET_F27 PPC_CONTEXT_OFFSET_F( 27 )
443  #define PPC_CONTEXT_OFFSET_F28 PPC_CONTEXT_OFFSET_F( 28 )
444  #define PPC_CONTEXT_OFFSET_F29 PPC_CONTEXT_OFFSET_F( 29 )
445  #define PPC_CONTEXT_OFFSET_F30 PPC_CONTEXT_OFFSET_F( 30 )
446  #define PPC_CONTEXT_OFFSET_F31 PPC_CONTEXT_OFFSET_F( 31 )
447 #endif
448 
449 #if defined(PPC_MULTILIB_FPU)
450  #define PPC_CONTEXT_VOLATILE_SIZE PPC_CONTEXT_OFFSET_F( 32 )
451 #elif defined(PPC_MULTILIB_ALTIVEC)
452  #define PPC_CONTEXT_VOLATILE_SIZE (PPC_CONTEXT_OFFSET_VRSAVE + 4)
453 #else
454  #define PPC_CONTEXT_VOLATILE_SIZE (PPC_CONTEXT_GPR_OFFSET( 32 ) + 4)
455 #endif
456 
457 #ifdef RTEMS_SMP
458  #define PPC_CONTEXT_OFFSET_IS_EXECUTING PPC_CONTEXT_VOLATILE_SIZE
459 #endif
460 
461 #ifndef ASM
462 typedef struct {
463 #if (PPC_HAS_FPU == 1)
464  /* The ABIs (PowerOpen/SVR4/EABI) only require saving f14-f31 over
465  * procedure calls. However, this would mean that the interrupt
466  * frame had to hold f0-f13, and the fpscr. And as the majority
467  * of tasks will not have an FP context, we will save the whole
468  * context here.
469  */
470 #if (PPC_HAS_DOUBLE == 1)
471  double f[32];
472  uint64_t fpscr;
473 #else
474  float f[32];
475  uint32_t fpscr;
476 #endif
477 #endif /* (PPC_HAS_FPU == 1) */
479 
480 typedef struct CPU_Interrupt_frame {
481  uint32_t stacklink; /* Ensure this is a real frame (also reg1 save) */
482  uint32_t calleeLr; /* link register used by callees: SVR4/EABI */
483 
484  /* This is what is left out of the primary contexts */
485  uint32_t gpr0;
486  uint32_t gpr2; /* play safe */
487  uint32_t gpr3;
488  uint32_t gpr4;
489  uint32_t gpr5;
490  uint32_t gpr6;
491  uint32_t gpr7;
492  uint32_t gpr8;
493  uint32_t gpr9;
494  uint32_t gpr10;
495  uint32_t gpr11;
496  uint32_t gpr12;
497  uint32_t gpr13; /* Play safe */
498  uint32_t gpr28; /* For internal use by the IRQ handler */
499  uint32_t gpr29; /* For internal use by the IRQ handler */
500  uint32_t gpr30; /* For internal use by the IRQ handler */
501  uint32_t gpr31; /* For internal use by the IRQ handler */
502  uint32_t cr; /* Bits of this are volatile, so no-one may save */
503  uint32_t ctr;
504  uint32_t xer;
505  uint32_t lr;
506  uint32_t pc;
507  uint32_t msr;
508  uint32_t pad[3];
510 
511 #endif /* ASM */
512 
513 /*
514  * Does the CPU follow the simple vectored interrupt model?
515  *
516  * If TRUE, then RTEMS allocates the vector table it internally manages.
517  * If FALSE, then the BSP is assumed to allocate and manage the vector
518  * table
519  *
520  * PowerPC Specific Information:
521  *
522  * The PowerPC and x86 were the first to use the PIC interrupt model.
523  * They do not use the simple vectored interrupt model.
524  */
525 #define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
526 
527 /*
528  * Does RTEMS manage a dedicated interrupt stack in software?
529  *
530  * If TRUE, then a stack is allocated in _ISR_Handler_initialization.
531  * If FALSE, nothing is done.
532  *
533  * If the CPU supports a dedicated interrupt stack in hardware,
534  * then it is generally the responsibility of the BSP to allocate it
535  * and set it up.
536  *
537  * If the CPU does not support a dedicated interrupt stack, then
538  * the porter has two options: (1) execute interrupts on the
539  * stack of the interrupted task, and (2) have RTEMS manage a dedicated
540  * interrupt stack.
541  *
542  * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
543  *
544  * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
545  * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is
546  * possible that both are FALSE for a particular CPU. Although it
547  * is unclear what that would imply about the interrupt processing
548  * procedure on that CPU.
549  */
550 
551 #define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
552 
553 /*
554  * Does this CPU have hardware support for a dedicated interrupt stack?
555  *
556  * If TRUE, then it must be installed during initialization.
557  * If FALSE, then no installation is performed.
558  *
559  * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
560  *
561  * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
562  * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is
563  * possible that both are FALSE for a particular CPU. Although it
564  * is unclear what that would imply about the interrupt processing
565  * procedure on that CPU.
566  */
567 
568 #define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
569 
570 /*
571  * Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
572  *
573  * If TRUE, then the memory is allocated during initialization.
574  * If FALSE, then the memory is allocated during initialization.
575  *
576  * This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE.
577  */
578 
579 #define CPU_ALLOCATE_INTERRUPT_STACK TRUE
580 
581 /*
582  * Does the RTEMS invoke the user's ISR with the vector number and
583  * a pointer to the saved interrupt frame (1) or just the vector
584  * number (0)?
585  */
586 
587 #define CPU_ISR_PASSES_FRAME_POINTER 0
588 
589 /*
590  * Should the saving of the floating point registers be deferred
591  * until a context switch is made to another different floating point
592  * task?
593  *
594  * If TRUE, then the floating point context will not be stored until
595  * necessary. It will remain in the floating point registers and not
596  * disturned until another floating point task is switched to.
597  *
598  * If FALSE, then the floating point context is saved when a floating
599  * point task is switched out and restored when the next floating point
600  * task is restored. The state of the floating point registers between
601  * those two operations is not specified.
602  *
603  * If the floating point context does NOT have to be saved as part of
604  * interrupt dispatching, then it should be safe to set this to TRUE.
605  *
606  * Setting this flag to TRUE results in using a different algorithm
607  * for deciding when to save and restore the floating point context.
608  * The deferred FP switch algorithm minimizes the number of times
609  * the FP context is saved and restored. The FP context is not saved
610  * until a context switch is made to another, different FP task.
611  * Thus in a system with only one FP task, the FP context will never
612  * be saved or restored.
613  *
614  * Note, however that compilers may use floating point registers/
615  * instructions for optimization or they may save/restore FP registers
616  * on the stack. You must not use deferred switching in these cases
617  * and on the PowerPC attempting to do so will raise a "FP unavailable"
618  * exception.
619  */
620 /*
621  * ACB Note: This could make debugging tricky..
622  */
623 
624 /* conservative setting (FALSE); probably doesn't affect performance too much */
625 #define CPU_USE_DEFERRED_FP_SWITCH FALSE
626 
627 /*
628  * Processor defined structures required for cpukit/score.
629  */
630 
631 #ifndef ASM
632 
633 /*
634  * This variable is optional. It is used on CPUs on which it is difficult
635  * to generate an "uninitialized" FP context. It is filled in by
636  * _CPU_Initialize and copied into the task's FP context area during
637  * _CPU_Context_Initialize.
638  */
639 
640 /* EXTERN Context_Control_fp _CPU_Null_fp_context; */
641 
642 #endif /* ndef ASM */
643 
644 /*
645  * This defines the number of levels and the mask used to pick those
646  * bits out of a thread mode.
647  */
648 
649 #define CPU_MODES_INTERRUPT_LEVEL 0x00000001 /* interrupt level in mode */
650 #define CPU_MODES_INTERRUPT_MASK 0x00000001 /* interrupt level in mode */
651 
652 /*
653  * The size of the floating point context area. On some CPUs this
654  * will not be a "sizeof" because the format of the floating point
655  * area is not defined -- only the size is. This is usually on
656  * CPUs with a "floating point save context" instruction.
657  */
658 
659 #define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
660 
661 /*
662  * (Optional) # of bytes for libmisc/stackchk to check
663  * If not specifed, then it defaults to something reasonable
664  * for most architectures.
665  */
666 
667 #define CPU_STACK_CHECK_SIZE (128)
668 
669 /*
670  * Amount of extra stack (above minimum stack size) required by
671  * MPCI receive server thread. Remember that in a multiprocessor
672  * system this thread must exist and be able to process all directives.
673  */
674 
675 #define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
676 
677 /*
678  * This is defined if the port has a special way to report the ISR nesting
679  * level. Most ports maintain the variable _ISR_Nest_level. Note that
680  * this is not an option - RTEMS/score _relies_ on _ISR_Nest_level
681  * being maintained (e.g. watchdog queues).
682  */
683 
684 #define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
685 
686 /*
687  * ISR handler macros
688  */
689 
690 /*
691  * Disable all interrupts for an RTEMS critical section. The previous
692  * level is returned in _isr_cookie.
693  */
694 
695 #ifndef ASM
696 
697 static inline uint32_t _CPU_ISR_Get_level( void )
698 {
699  register unsigned int msr;
700  _CPU_MSR_GET(msr);
701  if (msr & MSR_EE) return 0;
702  else return 1;
703 }
704 
705 static inline void _CPU_ISR_Set_level( uint32_t level )
706 {
707  register unsigned int msr;
708  _CPU_MSR_GET(msr);
709  if (!(level & CPU_MODES_INTERRUPT_MASK)) {
710  msr |= ppc_interrupt_get_disable_mask();
711  }
712  else {
713  msr &= ~ppc_interrupt_get_disable_mask();
714  }
715  _CPU_MSR_SET(msr);
716 }
717 
718 void BSP_panic(char *);
719 
720 /* Fatal Error manager macros */
721 
722 /*
723  * This routine copies _error into a known place -- typically a stack
724  * location or a register, optionally disables interrupts, and
725  * halts/stops the CPU.
726  */
727 
728 void _BSP_Fatal_error(unsigned int);
729 
730 #endif /* ASM */
731 
732 #define _CPU_Fatal_halt( _source, _error ) \
733  _BSP_Fatal_error(_error)
734 
735 /* end of Fatal Error manager macros */
736 
737 /*
738  * Should be large enough to run all RTEMS tests. This ensures
739  * that a "reasonable" small application should not have any problems.
740  */
741 
742 #define CPU_STACK_MINIMUM_SIZE (1024*8)
743 
744 #define CPU_SIZEOF_POINTER 4
745 
746 /*
747  * CPU's worst alignment requirement for data types on a byte boundary. This
748  * alignment does not take into account the requirements for the stack.
749  */
750 
751 #define CPU_ALIGNMENT (PPC_ALIGNMENT)
752 
753 /*
754  * This number corresponds to the byte alignment requirement for the
755  * heap handler. This alignment requirement may be stricter than that
756  * for the data types alignment specified by CPU_ALIGNMENT. It is
757  * common for the heap to follow the same alignment requirement as
758  * CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict enough for the heap,
759  * then this should be set to CPU_ALIGNMENT.
760  *
761  * NOTE: This does not have to be a power of 2. It does have to
762  * be greater or equal to than CPU_ALIGNMENT.
763  */
764 
765 #define CPU_HEAP_ALIGNMENT (PPC_ALIGNMENT)
766 
767 /*
768  * This number corresponds to the byte alignment requirement for memory
769  * buffers allocated by the partition manager. This alignment requirement
770  * may be stricter than that for the data types alignment specified by
771  * CPU_ALIGNMENT. It is common for the partition to follow the same
772  * alignment requirement as CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict
773  * enough for the partition, then this should be set to CPU_ALIGNMENT.
774  *
775  * NOTE: This does not have to be a power of 2. It does have to
776  * be greater or equal to than CPU_ALIGNMENT.
777  */
778 
779 #define CPU_PARTITION_ALIGNMENT (PPC_ALIGNMENT)
780 
781 /*
782  * This number corresponds to the byte alignment requirement for the
783  * stack. This alignment requirement may be stricter than that for the
784  * data types alignment specified by CPU_ALIGNMENT. If the CPU_ALIGNMENT
785  * is strict enough for the stack, then this should be set to 0.
786  *
787  * NOTE: This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
788  */
789 
790 #define CPU_STACK_ALIGNMENT (PPC_STACK_ALIGNMENT)
791 
792 #ifndef ASM
793 /* The following routine swaps the endian format of an unsigned int.
794  * It must be static because it is referenced indirectly.
795  *
796  * This version will work on any processor, but if there is a better
797  * way for your CPU PLEASE use it. The most common way to do this is to:
798  *
799  * swap least significant two bytes with 16-bit rotate
800  * swap upper and lower 16-bits
801  * swap most significant two bytes with 16-bit rotate
802  *
803  * Some CPUs have special instructions which swap a 32-bit quantity in
804  * a single instruction (e.g. i486). It is probably best to avoid
805  * an "endian swapping control bit" in the CPU. One good reason is
806  * that interrupts would probably have to be disabled to ensure that
807  * an interrupt does not try to access the same "chunk" with the wrong
808  * endian. Another good reason is that on some CPUs, the endian bit
809  * endianness for ALL fetches -- both code and data -- so the code
810  * will be fetched incorrectly.
811  */
812 
813 static inline uint32_t CPU_swap_u32(
814  uint32_t value
815 )
816 {
817  uint32_t swapped;
818 
819  __asm__ volatile("rlwimi %0,%1,8,24,31;"
820  "rlwimi %0,%1,24,16,23;"
821  "rlwimi %0,%1,8,8,15;"
822  "rlwimi %0,%1,24,0,7;" :
823  "=&r" ((swapped)) : "r" ((value)));
824 
825  return( swapped );
826 }
827 
828 #define CPU_swap_u16( value ) \
829  (((value&0xff) << 8) | ((value >> 8)&0xff))
830 
831 typedef uint32_t CPU_Counter_ticks;
832 
833 static inline CPU_Counter_ticks _CPU_Counter_read( void )
834 {
835  CPU_Counter_ticks value;
836 
837 #if defined(__PPC_CPU_E6500__)
838  /* Use Alternate Time Base */
839  __asm__ volatile( "mfspr %0, 526" : "=r" (value) );
840 #else
841  __asm__ volatile( "mfspr %0, 268" : "=r" (value) );
842 #endif
843 
844  return value;
845 }
846 
847 static inline CPU_Counter_ticks _CPU_Counter_difference(
848  CPU_Counter_ticks second,
849  CPU_Counter_ticks first
850 )
851 {
852  return second - first;
853 }
854 
855 #endif /* ASM */
856 
857 
858 #ifndef ASM
859 /* Context handler macros */
860 
861 /*
862  * Initialize the context to a state suitable for starting a
863  * task after a context restore operation. Generally, this
864  * involves:
865  *
866  * - setting a starting address
867  * - preparing the stack
868  * - preparing the stack and frame pointers
869  * - setting the proper interrupt level in the context
870  * - initializing the floating point context
871  *
872  * This routine generally does not set any unnecessary register
873  * in the context. The state of the "general data" registers is
874  * undefined at task start time.
875  */
876 
878  Context_Control *the_context,
879  uint32_t *stack_base,
880  uint32_t size,
881  uint32_t new_level,
882  void *entry_point,
883  bool is_fp,
884  void *tls_area
885 );
886 
887 /*
888  * This routine is responsible for somehow restarting the currently
889  * executing task. If you are lucky, then all that is necessary
890  * is restoring the context. Otherwise, there will need to be
891  * a special assembly routine which does something special in this
892  * case. Context_Restore should work most of the time. It will
893  * not work if restarting self conflicts with the stack frame
894  * assumptions of restoring a context.
895  */
896 
897 #define _CPU_Context_Restart_self( _the_context ) \
898  _CPU_Context_restore( (_the_context) );
899 
900 /*
901  * The purpose of this macro is to allow the initial pointer into
902  * a floating point context area (used to save the floating point
903  * context) to be at an arbitrary place in the floating point
904  * context area.
905  *
906  * This is necessary because some FP units are designed to have
907  * their context saved as a stack which grows into lower addresses.
908  * Other FP units can be saved by simply moving registers into offsets
909  * from the base of the context area. Finally some FP units provide
910  * a "dump context" instruction which could fill in from high to low
911  * or low to high based on the whim of the CPU designers.
912  */
913 
914 #define _CPU_Context_Fp_start( _base, _offset ) \
915  ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
916 
917 /*
918  * This routine initializes the FP context area passed to it to.
919  * There are a few standard ways in which to initialize the
920  * floating point context. The code included for this macro assumes
921  * that this is a CPU in which a "initial" FP context was saved into
922  * _CPU_Null_fp_context and it simply copies it to the destination
923  * context passed to it.
924  *
925  * Other models include (1) not doing anything, and (2) putting
926  * a "null FP status word" in the correct place in the FP context.
927  */
928 
929 #define _CPU_Context_Initialize_fp( _destination ) \
930  memset( *(_destination), 0, sizeof( **(_destination) ) )
931 
932 /* end of Context handler macros */
933 #endif /* ASM */
934 
935 #ifndef ASM
936 /* Bitfield handler macros */
937 
938 /*
939  * This routine sets _output to the bit number of the first bit
940  * set in _value. _value is of CPU dependent type Priority_bit_map_Word.
941  * This type may be either 16 or 32 bits wide although only the 16
942  * least significant bits will be used.
943  *
944  * There are a number of variables in using a "find first bit" type
945  * instruction.
946  *
947  * (1) What happens when run on a value of zero?
948  * (2) Bits may be numbered from MSB to LSB or vice-versa.
949  * (3) The numbering may be zero or one based.
950  * (4) The "find first bit" instruction may search from MSB or LSB.
951  *
952  * RTEMS guarantees that (1) will never happen so it is not a concern.
953  * (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
954  * _CPU_Priority_Bits_index(). These three form a set of routines
955  * which must logically operate together. Bits in the _value are
956  * set and cleared based on masks built by _CPU_Priority_mask().
957  * The basic major and minor values calculated by _Priority_Major()
958  * and _Priority_Minor() are "massaged" by _CPU_Priority_Bits_index()
959  * to properly range between the values returned by the "find first bit"
960  * instruction. This makes it possible for _Priority_Get_highest() to
961  * calculate the major and directly index into the minor table.
962  * This mapping is necessary to ensure that 0 (a high priority major/minor)
963  * is the first bit found.
964  *
965  * This entire "find first bit" and mapping process depends heavily
966  * on the manner in which a priority is broken into a major and minor
967  * components with the major being the 4 MSB of a priority and minor
968  * the 4 LSB. Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
969  * priority. And (15 << 4) + 14 corresponds to priority 254 -- the next
970  * to the lowest priority.
971  *
972  * If your CPU does not have a "find first bit" instruction, then
973  * there are ways to make do without it. Here are a handful of ways
974  * to implement this in software:
975  *
976  * - a series of 16 bit test instructions
977  * - a "binary search using if's"
978  * - _number = 0
979  * if _value > 0x00ff
980  * _value >>=8
981  * _number = 8;
982  *
983  * if _value > 0x0000f
984  * _value >=8
985  * _number += 4
986  *
987  * _number += bit_set_table[ _value ]
988  *
989  * where bit_set_table[ 16 ] has values which indicate the first
990  * bit set
991  */
992 
993 #define _CPU_Bitfield_Find_first_bit( _value, _output ) \
994  { \
995  __asm__ volatile ("cntlzw %0, %1" : "=r" ((_output)), "=r" ((_value)) : \
996  "1" ((_value))); \
997  }
998 
999 /* end of Bitfield handler macros */
1000 
1001 /*
1002  * This routine builds the mask which corresponds to the bit fields
1003  * as searched by _CPU_Bitfield_Find_first_bit(). See the discussion
1004  * for that routine.
1005  */
1006 
1007 #define _CPU_Priority_Mask( _bit_number ) \
1008  ( 0x80000000 >> (_bit_number) )
1009 
1010 /*
1011  * This routine translates the bit numbers returned by
1012  * _CPU_Bitfield_Find_first_bit() into something suitable for use as
1013  * a major or minor component of a priority. See the discussion
1014  * for that routine.
1015  */
1016 
1017 #define _CPU_Priority_bits_index( _priority ) \
1018  (_priority)
1019 
1020 /* end of Priority handler macros */
1021 #endif /* ASM */
1022 
1023 /* functions */
1024 
1025 #ifndef ASM
1026 
1027 /*
1028  * _CPU_Initialize
1029  *
1030  * This routine performs CPU dependent initialization.
1031  */
1032 
1033 void _CPU_Initialize(void);
1034 
1035 /*
1036  * _CPU_ISR_install_vector
1037  *
1038  * This routine installs an interrupt vector.
1039  */
1040 
1042  uint32_t vector,
1043  proc_ptr new_handler,
1044  proc_ptr *old_handler
1045 );
1046 
1047 /*
1048  * _CPU_Context_switch
1049  *
1050  * This routine switches from the run context to the heir context.
1051  */
1052 
1053 void _CPU_Context_switch(
1054  Context_Control *run,
1055  Context_Control *heir
1056 );
1057 
1058 /*
1059  * _CPU_Context_restore
1060  *
1061  * This routine is generallu used only to restart self in an
1062  * efficient manner. It may simply be a label in _CPU_Context_switch.
1063  *
1064  * NOTE: May be unnecessary to reload some registers.
1065  */
1066 
1068  Context_Control *new_context
1070 
1071 /*
1072  * _CPU_Context_save_fp
1073  *
1074  * This routine saves the floating point context passed to it.
1075  */
1076 
1078  Context_Control_fp **fp_context_ptr
1079 );
1080 
1081 /*
1082  * _CPU_Context_restore_fp
1083  *
1084  * This routine restores the floating point context passed to it.
1085  */
1086 
1088  Context_Control_fp **fp_context_ptr
1089 );
1090 
1091 void _CPU_Context_volatile_clobber( uintptr_t pattern );
1092 
1093 void _CPU_Context_validate( uintptr_t pattern );
1094 
1095 #ifdef RTEMS_SMP
1096  uint32_t _CPU_SMP_Initialize( void );
1097 
1098  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
1099 
1100  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
1101 
1102  void _CPU_SMP_Prepare_start_multitasking( void );
1103 
1104  static inline uint32_t _CPU_SMP_Get_current_processor( void )
1105  {
1106  uint32_t pir;
1107 
1108  /* Use Book E Processor ID Register (PIR) */
1109  __asm__ volatile (
1110  "mfspr %[pir], 286"
1111  : [pir] "=&r" (pir)
1112  );
1113 
1114  return pir;
1115  }
1116 
1117  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
1118 
1119  static inline void _CPU_SMP_Processor_event_broadcast( void )
1120  {
1121  __asm__ volatile ( "" : : : "memory" );
1122  }
1123 
1124  static inline void _CPU_SMP_Processor_event_receive( void )
1125  {
1126  __asm__ volatile ( "" : : : "memory" );
1127  }
1128 #endif
1129 
1130 typedef struct {
1131  uint32_t EXC_SRR0;
1132  uint32_t EXC_SRR1;
1133  uint32_t _EXC_number;
1134  uint32_t EXC_CR;
1135  uint32_t EXC_CTR;
1136  uint32_t EXC_XER;
1137  uint32_t EXC_LR;
1138  #ifdef __SPE__
1139  uint32_t EXC_SPEFSCR;
1140  uint64_t EXC_ACC;
1141  #endif
1142  PPC_GPR_TYPE GPR0;
1143  PPC_GPR_TYPE GPR1;
1144  PPC_GPR_TYPE GPR2;
1145  PPC_GPR_TYPE GPR3;
1146  PPC_GPR_TYPE GPR4;
1147  PPC_GPR_TYPE GPR5;
1148  PPC_GPR_TYPE GPR6;
1149  PPC_GPR_TYPE GPR7;
1150  PPC_GPR_TYPE GPR8;
1151  PPC_GPR_TYPE GPR9;
1152  PPC_GPR_TYPE GPR10;
1153  PPC_GPR_TYPE GPR11;
1154  PPC_GPR_TYPE GPR12;
1155  PPC_GPR_TYPE GPR13;
1156  PPC_GPR_TYPE GPR14;
1157  PPC_GPR_TYPE GPR15;
1158  PPC_GPR_TYPE GPR16;
1159  PPC_GPR_TYPE GPR17;
1160  PPC_GPR_TYPE GPR18;
1161  PPC_GPR_TYPE GPR19;
1162  PPC_GPR_TYPE GPR20;
1163  PPC_GPR_TYPE GPR21;
1164  PPC_GPR_TYPE GPR22;
1165  PPC_GPR_TYPE GPR23;
1166  PPC_GPR_TYPE GPR24;
1167  PPC_GPR_TYPE GPR25;
1168  PPC_GPR_TYPE GPR26;
1169  PPC_GPR_TYPE GPR27;
1170  PPC_GPR_TYPE GPR28;
1171  PPC_GPR_TYPE GPR29;
1172  PPC_GPR_TYPE GPR30;
1173  PPC_GPR_TYPE GPR31;
1174  #if defined(PPC_MULTILIB_ALTIVEC) || defined(PPC_MULTILIB_FPU)
1175  uint32_t reserved_for_alignment;
1176  #endif
1177  #ifdef PPC_MULTILIB_ALTIVEC
1178  uint32_t VRSAVE;
1179 
1180  /* This field must take stvewx/lvewx requirements into account */
1181  uint32_t VSCR;
1182 
1183  uint8_t V0[16];
1184  uint8_t V1[16];
1185  uint8_t V2[16];
1186  uint8_t V3[16];
1187  uint8_t V4[16];
1188  uint8_t V5[16];
1189  uint8_t V6[16];
1190  uint8_t V7[16];
1191  uint8_t V8[16];
1192  uint8_t V9[16];
1193  uint8_t V10[16];
1194  uint8_t V11[16];
1195  uint8_t V12[16];
1196  uint8_t V13[16];
1197  uint8_t V14[16];
1198  uint8_t V15[16];
1199  uint8_t V16[16];
1200  uint8_t V17[16];
1201  uint8_t V18[16];
1202  uint8_t V19[16];
1203  uint8_t V20[16];
1204  uint8_t V21[16];
1205  uint8_t V22[16];
1206  uint8_t V23[16];
1207  uint8_t V24[16];
1208  uint8_t V25[16];
1209  uint8_t V26[16];
1210  uint8_t V27[16];
1211  uint8_t V28[16];
1212  uint8_t V29[16];
1213  uint8_t V30[16];
1214  uint8_t V31[16];
1215  #endif
1216  #ifdef PPC_MULTILIB_FPU
1217  double F0;
1218  double F1;
1219  double F2;
1220  double F3;
1221  double F4;
1222  double F5;
1223  double F6;
1224  double F7;
1225  double F8;
1226  double F9;
1227  double F10;
1228  double F11;
1229  double F12;
1230  double F13;
1231  double F14;
1232  double F15;
1233  double F16;
1234  double F17;
1235  double F18;
1236  double F19;
1237  double F20;
1238  double F21;
1239  double F22;
1240  double F23;
1241  double F24;
1242  double F25;
1243  double F26;
1244  double F27;
1245  double F28;
1246  double F29;
1247  double F30;
1248  double F31;
1249  uint64_t FPSCR;
1250  #endif
1252 
1253 void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
1254 
1255 /*
1256  * _CPU_Initialize_altivec()
1257  *
1258  * Global altivec-related initialization.
1259  */
1260 void
1261 _CPU_Initialize_altivec(void);
1262 
1263 /*
1264  * _CPU_Context_switch_altivec
1265  *
1266  * This routine switches the altivec contexts passed to it.
1267  */
1268 
1269 void
1270 _CPU_Context_switch_altivec(
1271  ppc_context *from,
1272  ppc_context *to
1273 );
1274 
1275 /*
1276  * _CPU_Context_restore_altivec
1277  *
1278  * This routine restores the altivec context passed to it.
1279  */
1280 
1281 void
1282 _CPU_Context_restore_altivec(
1283  ppc_context *ctxt
1284 );
1285 
1286 /*
1287  * _CPU_Context_initialize_altivec
1288  *
1289  * This routine initializes the altivec context passed to it.
1290  */
1291 
1292 void
1293 _CPU_Context_initialize_altivec(
1294  ppc_context *ctxt
1295 );
1296 
1297 void _CPU_Fatal_error(
1298  uint32_t _error
1299 );
1300 
1301 #endif /* ASM */
1302 
1303 #ifdef __cplusplus
1304 }
1305 #endif
1306 
1307 #endif /* _RTEMS_SCORE_CPU_H */
void _CPU_ISR_install_vector(uint32_t vector, proc_ptr new_handler, proc_ptr *old_handler)
This routine installs an interrupt vector.
Definition: cpu.c:69
void _CPU_Context_validate(uintptr_t pattern)
Initializes and validates the CPU context with values derived from the pattern parameter.
Definition: cpu.h:1109
uint32_t _CPU_ISR_Get_level(void)
Return the current interrupt disable level for this task in the format used by the interrupt level po...
Definition: cpu.c:39
void _CPU_Context_restore(Context_Control *new_context)
This routine is generally used only to restart self in an efficient manner.
Definition: cpu_asm.c:112
void _CPU_Context_switch(Context_Control *run, Context_Control *heir)
CPU switch context.
Definition: cpu_asm.c:92
void _CPU_Context_volatile_clobber(uintptr_t pattern)
Clobbers all volatile registers with values derived from the pattern parameter.
Definition: cpu.h:1104
Definition: cpu.h:255
This defines the minimal set of integer and processor state registers that must be saved during a vol...
Definition: cpu.h:248
void _CPU_Initialize(void)
CPU initialization.
Definition: cpu.c:26
uint32_t reserved_for_alignment
This is the offset is reserved for alignment on an ISF.
Definition: cpu.h:717
uint32_t CPU_Counter_ticks
Unsigned integer type for CPU counter values.
Definition: cpu.h:1461
This defines the set of integer and processor state registers that must be saved during an interrupt...
Definition: cpu.h:425
void _CPU_ISR_Set_level(uint32_t level)
Sets the hardware interrupt level by the level value.
Definition: cpu.c:62
register struct Per_CPU_Control *_SPARC_Per_CPU_current __asm__("g6")
The pointer to the current per-CPU control is available via register g6.
void _CPU_Context_restore_fp(Context_Control_fp **fp_context_ptr)
This routine restores the floating point context passed to it.
Definition: cpu.c:176
IBM/Motorola Power Pc Definitions.
void _CPU_Context_Initialize(Context_Control *the_context, uint32_t *stack_base, uint32_t size, uint32_t new_level, void *entry_point, bool is_fp, void *tls_area)
Initialize the context to a state suitable for starting a task after a context restore operation...
Definition: cpu.c:183
CPU_Counter_ticks _CPU_Counter_read(void)
Returns the current CPU counter value.
Definition: cpu.c:96
CPU_Counter_ticks _CPU_Counter_difference(CPU_Counter_ticks second, CPU_Counter_ticks first)
Returns the difference between the second and first CPU counter value.
Definition: cpu.h:1160
This defines the complete set of floating point registers that must be saved during any context switc...
Definition: cpu.h:294
void _CPU_Context_save_fp(Context_Control_fp **fp_context_ptr)
This routine saves the floating point context passed to it.
Definition: cpu.c:167
#define RTEMS_COMPILER_NO_RETURN_ATTRIBUTE
The following macro is a compiler specific way to indicate that the method will NOT return to the cal...
Definition: basedefs.h:162
void _CPU_Exception_frame_print(const CPU_Exception_frame *frame)
Prints the exception frame via printk().
Definition: arm-exception-frame-print.c:46
The set of registers that specifies the complete processor state.
Definition: cpu.h:671
#define CPU_MODES_INTERRUPT_MASK
The following defines the number of bits actually used in the interrupt field of the task mode...
Definition: cpu.h:375
PowerPc MSR and Registers Access Definitions.
uint32_t pc
This is the offset of the XXX on an ISF.
Definition: cpu.h:506
void * proc_ptr
XXX: Eventually proc_ptr needs to disappear!!!
Definition: basedefs.h:329