RTEMS 5.2
All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
cpu.h
Go to the documentation of this file.
1
9/*
10 * COPYRIGHT (c) 1989-2012.
11 * On-Line Applications Research Corporation (OAR).
12 *
13 * COPYRIGHT (c) 1995 i-cubed ltd.
14 *
15 * To anyone who acknowledges that this file is provided "AS IS"
16 * without any express or implied warranty:
17 * permission to use, copy, modify, and distribute this file
18 * for any purpose is hereby granted without fee, provided that
19 * the above copyright notice and this notice appears in all
20 * copies, and that the name of i-cubed limited not be used in
21 * advertising or publicity pertaining to distribution of the
22 * software without specific, written prior permission.
23 * i-cubed limited makes no representations about the suitability
24 * of this software for any purpose.
25 *
26 * Copyright (c) 2001 Andy Dachs <a.dachs@sstl.co.uk>.
27 *
28 * Copyright (c) 2001 Surrey Satellite Technology Limited (SSTL).
29 *
30 * Copyright (c) 2010, 2017 embedded brains GmbH.
31 *
32 * The license and distribution terms for this file may be
33 * found in the file LICENSE in this distribution or at
34 * http://www.rtems.org/license/LICENSE.
35 */
36
37#ifndef _RTEMS_SCORE_CPU_H
38#define _RTEMS_SCORE_CPU_H
39
41#if defined(RTEMS_PARAVIRT)
42#include <rtems/score/paravirt.h>
43#endif
44#include <rtems/score/powerpc.h>
46
47#ifndef ASM
48 #include <string.h> /* for memset() */
49#endif
50
51#ifdef __cplusplus
52extern "C" {
53#endif
54
55/* conditional compilation parameters */
56
57/*
58 * Does the stack grow up (toward higher addresses) or down
59 * (toward lower addresses)?
60 *
61 * If TRUE, then the grows upward.
62 * If FALSE, then the grows toward smaller addresses.
63 */
64
65#define CPU_STACK_GROWS_UP FALSE
66
67#define CPU_CACHE_LINE_BYTES PPC_STRUCTURE_ALIGNMENT
68
69#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
70
71/*
72 * Does the CPU have hardware floating point?
73 *
74 * If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
75 * If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
76 *
77 * If there is a FP coprocessor such as the i387 or mc68881, then
78 * the answer is TRUE.
79 *
80 * The macro name "PPC_HAS_FPU" should be made CPU specific.
81 * It indicates whether or not this CPU model has FP support. For
82 * example, it would be possible to have an i386_nofp CPU model
83 * which set this to false to indicate that you have an i386 without
84 * an i387 and wish to leave floating point support out of RTEMS.
85 */
86
87#if ( PPC_HAS_FPU == 1 )
88#define CPU_HARDWARE_FP TRUE
89#define CPU_SOFTWARE_FP FALSE
90#else
91#define CPU_HARDWARE_FP FALSE
92#define CPU_SOFTWARE_FP FALSE
93#endif
94
95/*
96 * Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
97 *
98 * If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
99 * If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
100 *
101 * If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
102 *
103 * PowerPC Note: It appears the GCC can implicitly generate FPU
104 * and Altivec instructions when you least expect them. So make
105 * all tasks floating point.
106 */
107
108#define CPU_ALL_TASKS_ARE_FP CPU_HARDWARE_FP
109
110/*
111 * Should the IDLE task have a floating point context?
112 *
113 * If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
114 * and it has a floating point context which is switched in and out.
115 * If FALSE, then the IDLE task does not have a floating point context.
116 *
117 * Setting this to TRUE negatively impacts the time required to preempt
118 * the IDLE task from an interrupt because the floating point context
119 * must be saved as part of the preemption.
120 */
121
122#define CPU_IDLE_TASK_IS_FP FALSE
123
124#define CPU_MAXIMUM_PROCESSORS 32
125
126/*
127 * Processor defined structures required for cpukit/score.
128 */
129
130/*
131 * Contexts
132 *
133 * Generally there are 2 types of context to save.
134 * 1. Interrupt registers to save
135 * 2. Task level registers to save
136 *
137 * This means we have the following 3 context items:
138 * 1. task level context stuff:: Context_Control
139 * 2. floating point task stuff:: Context_Control_fp
140 * 3. special interrupt level context :: Context_Control_interrupt
141 *
142 * On some processors, it is cost-effective to save only the callee
143 * preserved registers during a task context switch. This means
144 * that the ISR code needs to save those registers which do not
145 * persist across function calls. It is not mandatory to make this
146 * distinctions between the caller/callee saves registers for the
147 * purpose of minimizing context saved during task switch and on interrupts.
148 * If the cost of saving extra registers is minimal, simplicity is the
149 * choice. Save the same context on interrupt entry as for tasks in
150 * this case.
151 *
152 * Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
153 * care should be used in designing the context area.
154 *
155 * On some CPUs with hardware floating point support, the Context_Control_fp
156 * structure will not be used or it simply consist of an array of a
157 * fixed number of bytes. This is done when the floating point context
158 * is dumped by a "FP save context" type instruction and the format
159 * is not really defined by the CPU. In this case, there is no need
160 * to figure out the exact format -- only the size. Of course, although
161 * this is enough information for RTEMS, it is probably not enough for
162 * a debugger such as gdb. But that is another problem.
163 */
164
165#ifndef __SPE__
166 #define PPC_GPR_TYPE uintptr_t
167 #if defined(__powerpc64__)
168 #define PPC_GPR_SIZE 8
169 #define PPC_GPR_LOAD ld
170 #define PPC_GPR_STORE std
171 #else
172 #define PPC_GPR_SIZE 4
173 #define PPC_GPR_LOAD lwz
174 #define PPC_GPR_STORE stw
175 #endif
176#else
177 #define PPC_GPR_TYPE uint64_t
178 #define PPC_GPR_SIZE 8
179 #define PPC_GPR_LOAD evldd
180 #define PPC_GPR_STORE evstdd
181#endif
182
183#if defined(__powerpc64__)
184 #define PPC_REG_SIZE 8
185 #define PPC_REG_LOAD ld
186 #define PPC_REG_STORE std
187 #define PPC_REG_STORE_UPDATE stdu
188 #define PPC_REG_CMP cmpd
189#else
190 #define PPC_REG_SIZE 4
191 #define PPC_REG_LOAD lwz
192 #define PPC_REG_STORE stw
193 #define PPC_REG_STORE_UPDATE stwu
194 #define PPC_REG_CMP cmpw
195#endif
196
197#ifndef ASM
198
199/*
200 * Non-volatile context according to E500ABIUG, EABI and 32-bit TLS (according
201 * to "Power Architecture 32-bit Application Binary Interface Supplement 1.0 -
202 * Linux and Embedded")
203 */
204typedef struct {
205 uint32_t msr;
206 uint32_t cr;
207 uintptr_t gpr1;
208 uintptr_t lr;
209 PPC_GPR_TYPE gpr14;
210 PPC_GPR_TYPE gpr15;
211 PPC_GPR_TYPE gpr16;
212 PPC_GPR_TYPE gpr17;
213 PPC_GPR_TYPE gpr18;
214 PPC_GPR_TYPE gpr19;
215 PPC_GPR_TYPE gpr20;
216 PPC_GPR_TYPE gpr21;
217 PPC_GPR_TYPE gpr22;
218 PPC_GPR_TYPE gpr23;
219 PPC_GPR_TYPE gpr24;
220 PPC_GPR_TYPE gpr25;
221 PPC_GPR_TYPE gpr26;
222 PPC_GPR_TYPE gpr27;
223 PPC_GPR_TYPE gpr28;
224 PPC_GPR_TYPE gpr29;
225 PPC_GPR_TYPE gpr30;
226 PPC_GPR_TYPE gpr31;
227 uint32_t isr_dispatch_disable;
228 uint32_t reserved_for_alignment;
229 #if defined(PPC_MULTILIB_ALTIVEC)
230 uint8_t v20[16];
231 uint8_t v21[16];
232 uint8_t v22[16];
233 uint8_t v23[16];
234 uint8_t v24[16];
235 uint8_t v25[16];
236 uint8_t v26[16];
237 uint8_t v27[16];
238 uint8_t v28[16];
239 uint8_t v29[16];
240 uint8_t v30[16];
241 uint8_t v31[16];
242 uint32_t vrsave;
243 #elif defined(__ALTIVEC__)
244 /*
245 * 12 non-volatile vector registers, cache-aligned area for vscr/vrsave
246 * and padding to ensure cache-alignment. Unfortunately, we can't verify
247 * the cache line size here in the cpukit but altivec support code will
248 * produce an error if this is ever different from 32 bytes.
249 *
250 * Note: it is the BSP/CPU-support's responsibility to save/restore
251 * volatile vregs across interrupts and exceptions.
252 */
253 uint8_t altivec[16*12 + 32 + PPC_DEFAULT_CACHE_LINE_SIZE];
254 #endif
255 #if defined(PPC_MULTILIB_FPU)
256 double f14;
257 double f15;
258 double f16;
259 double f17;
260 double f18;
261 double f19;
262 double f20;
263 double f21;
264 double f22;
265 double f23;
266 double f24;
267 double f25;
268 double f26;
269 double f27;
270 double f28;
271 double f29;
272 double f30;
273 double f31;
274 #endif
275 /*
276 * The following items are at the structure end, so that we can use dcbz for
277 * the previous items to optimize the context switch. We must not set the
278 * following items to zero via the dcbz.
279 */
280 uintptr_t tp;
281 #if defined(RTEMS_SMP)
282 volatile uint32_t is_executing;
283 #endif
285
286typedef struct {
287 uint8_t context [
288 PPC_DEFAULT_CACHE_LINE_SIZE
289 + sizeof(ppc_context)
290 + (sizeof(ppc_context) % PPC_DEFAULT_CACHE_LINE_SIZE == 0
291 ? 0
292 : PPC_DEFAULT_CACHE_LINE_SIZE
293 - sizeof(ppc_context) % PPC_DEFAULT_CACHE_LINE_SIZE)
294 ];
296
297static inline ppc_context *ppc_get_context( const Context_Control *context )
298{
299 uintptr_t clsz = PPC_DEFAULT_CACHE_LINE_SIZE;
300 uintptr_t mask = clsz - 1;
301 uintptr_t addr = (uintptr_t) context;
302
303 return (ppc_context *) ((addr & ~mask) + clsz);
304}
305
306#define _CPU_Context_Get_SP( _context ) \
307 ppc_get_context(_context)->gpr1
308
309#ifdef RTEMS_SMP
310 static inline bool _CPU_Context_Get_is_executing(
312 )
313 {
314 return ppc_get_context(context)->is_executing;
315 }
316
317 static inline void _CPU_Context_Set_is_executing(
319 bool is_executing
320 )
321 {
322 ppc_get_context(context)->is_executing = is_executing;
323 }
324#endif
325#endif /* ASM */
326
327#define PPC_CONTEXT_OFFSET_MSR (PPC_DEFAULT_CACHE_LINE_SIZE)
328#define PPC_CONTEXT_OFFSET_CR (PPC_DEFAULT_CACHE_LINE_SIZE + 4)
329#define PPC_CONTEXT_OFFSET_GPR1 (PPC_DEFAULT_CACHE_LINE_SIZE + 8)
330#define PPC_CONTEXT_OFFSET_LR (PPC_DEFAULT_CACHE_LINE_SIZE + PPC_REG_SIZE + 8)
331
332#define PPC_CONTEXT_GPR_OFFSET( gpr ) \
333 (((gpr) - 14) * PPC_GPR_SIZE + \
334 PPC_DEFAULT_CACHE_LINE_SIZE + 8 + 2 * PPC_REG_SIZE)
335
336#define PPC_CONTEXT_OFFSET_GPR14 PPC_CONTEXT_GPR_OFFSET( 14 )
337#define PPC_CONTEXT_OFFSET_GPR15 PPC_CONTEXT_GPR_OFFSET( 15 )
338#define PPC_CONTEXT_OFFSET_GPR16 PPC_CONTEXT_GPR_OFFSET( 16 )
339#define PPC_CONTEXT_OFFSET_GPR17 PPC_CONTEXT_GPR_OFFSET( 17 )
340#define PPC_CONTEXT_OFFSET_GPR18 PPC_CONTEXT_GPR_OFFSET( 18 )
341#define PPC_CONTEXT_OFFSET_GPR19 PPC_CONTEXT_GPR_OFFSET( 19 )
342#define PPC_CONTEXT_OFFSET_GPR20 PPC_CONTEXT_GPR_OFFSET( 20 )
343#define PPC_CONTEXT_OFFSET_GPR21 PPC_CONTEXT_GPR_OFFSET( 21 )
344#define PPC_CONTEXT_OFFSET_GPR22 PPC_CONTEXT_GPR_OFFSET( 22 )
345#define PPC_CONTEXT_OFFSET_GPR23 PPC_CONTEXT_GPR_OFFSET( 23 )
346#define PPC_CONTEXT_OFFSET_GPR24 PPC_CONTEXT_GPR_OFFSET( 24 )
347#define PPC_CONTEXT_OFFSET_GPR25 PPC_CONTEXT_GPR_OFFSET( 25 )
348#define PPC_CONTEXT_OFFSET_GPR26 PPC_CONTEXT_GPR_OFFSET( 26 )
349#define PPC_CONTEXT_OFFSET_GPR27 PPC_CONTEXT_GPR_OFFSET( 27 )
350#define PPC_CONTEXT_OFFSET_GPR28 PPC_CONTEXT_GPR_OFFSET( 28 )
351#define PPC_CONTEXT_OFFSET_GPR29 PPC_CONTEXT_GPR_OFFSET( 29 )
352#define PPC_CONTEXT_OFFSET_GPR30 PPC_CONTEXT_GPR_OFFSET( 30 )
353#define PPC_CONTEXT_OFFSET_GPR31 PPC_CONTEXT_GPR_OFFSET( 31 )
354#define PPC_CONTEXT_OFFSET_ISR_DISPATCH_DISABLE PPC_CONTEXT_GPR_OFFSET( 32 )
355
356#ifdef PPC_MULTILIB_ALTIVEC
357 #define PPC_CONTEXT_OFFSET_V( v ) \
358 ( ( ( v ) - 20 ) * 16 + PPC_CONTEXT_OFFSET_ISR_DISPATCH_DISABLE + 8)
359 #define PPC_CONTEXT_OFFSET_V20 PPC_CONTEXT_OFFSET_V( 20 )
360 #define PPC_CONTEXT_OFFSET_V21 PPC_CONTEXT_OFFSET_V( 21 )
361 #define PPC_CONTEXT_OFFSET_V22 PPC_CONTEXT_OFFSET_V( 22 )
362 #define PPC_CONTEXT_OFFSET_V23 PPC_CONTEXT_OFFSET_V( 23 )
363 #define PPC_CONTEXT_OFFSET_V24 PPC_CONTEXT_OFFSET_V( 24 )
364 #define PPC_CONTEXT_OFFSET_V25 PPC_CONTEXT_OFFSET_V( 25 )
365 #define PPC_CONTEXT_OFFSET_V26 PPC_CONTEXT_OFFSET_V( 26 )
366 #define PPC_CONTEXT_OFFSET_V27 PPC_CONTEXT_OFFSET_V( 27 )
367 #define PPC_CONTEXT_OFFSET_V28 PPC_CONTEXT_OFFSET_V( 28 )
368 #define PPC_CONTEXT_OFFSET_V29 PPC_CONTEXT_OFFSET_V( 29 )
369 #define PPC_CONTEXT_OFFSET_V30 PPC_CONTEXT_OFFSET_V( 30 )
370 #define PPC_CONTEXT_OFFSET_V31 PPC_CONTEXT_OFFSET_V( 31 )
371 #define PPC_CONTEXT_OFFSET_VRSAVE PPC_CONTEXT_OFFSET_V( 32 )
372 #define PPC_CONTEXT_OFFSET_F( f ) \
373 ( ( ( f ) - 14 ) * 8 + PPC_CONTEXT_OFFSET_VRSAVE + 8 )
374#else
375 #define PPC_CONTEXT_OFFSET_F( f ) \
376 ( ( ( f ) - 14 ) * 8 + PPC_CONTEXT_OFFSET_ISR_DISPATCH_DISABLE + 8 )
377#endif
378
379#ifdef PPC_MULTILIB_FPU
380 #define PPC_CONTEXT_OFFSET_F14 PPC_CONTEXT_OFFSET_F( 14 )
381 #define PPC_CONTEXT_OFFSET_F15 PPC_CONTEXT_OFFSET_F( 15 )
382 #define PPC_CONTEXT_OFFSET_F16 PPC_CONTEXT_OFFSET_F( 16 )
383 #define PPC_CONTEXT_OFFSET_F17 PPC_CONTEXT_OFFSET_F( 17 )
384 #define PPC_CONTEXT_OFFSET_F18 PPC_CONTEXT_OFFSET_F( 18 )
385 #define PPC_CONTEXT_OFFSET_F19 PPC_CONTEXT_OFFSET_F( 19 )
386 #define PPC_CONTEXT_OFFSET_F20 PPC_CONTEXT_OFFSET_F( 20 )
387 #define PPC_CONTEXT_OFFSET_F21 PPC_CONTEXT_OFFSET_F( 21 )
388 #define PPC_CONTEXT_OFFSET_F22 PPC_CONTEXT_OFFSET_F( 22 )
389 #define PPC_CONTEXT_OFFSET_F23 PPC_CONTEXT_OFFSET_F( 23 )
390 #define PPC_CONTEXT_OFFSET_F24 PPC_CONTEXT_OFFSET_F( 24 )
391 #define PPC_CONTEXT_OFFSET_F25 PPC_CONTEXT_OFFSET_F( 25 )
392 #define PPC_CONTEXT_OFFSET_F26 PPC_CONTEXT_OFFSET_F( 26 )
393 #define PPC_CONTEXT_OFFSET_F27 PPC_CONTEXT_OFFSET_F( 27 )
394 #define PPC_CONTEXT_OFFSET_F28 PPC_CONTEXT_OFFSET_F( 28 )
395 #define PPC_CONTEXT_OFFSET_F29 PPC_CONTEXT_OFFSET_F( 29 )
396 #define PPC_CONTEXT_OFFSET_F30 PPC_CONTEXT_OFFSET_F( 30 )
397 #define PPC_CONTEXT_OFFSET_F31 PPC_CONTEXT_OFFSET_F( 31 )
398#endif
399
400#if defined(PPC_MULTILIB_FPU)
401 #define PPC_CONTEXT_VOLATILE_SIZE PPC_CONTEXT_OFFSET_F( 32 )
402#elif defined(PPC_MULTILIB_ALTIVEC)
403 #define PPC_CONTEXT_VOLATILE_SIZE (PPC_CONTEXT_OFFSET_VRSAVE + 4)
404#elif defined(__ALTIVEC__)
405 #define PPC_CONTEXT_VOLATILE_SIZE \
406 (PPC_CONTEXT_GPR_OFFSET( 32 ) + 8 \
407 + 16 * 12 + 32 + PPC_DEFAULT_CACHE_LINE_SIZE)
408#else
409 #define PPC_CONTEXT_VOLATILE_SIZE (PPC_CONTEXT_GPR_OFFSET( 32 ) + 8)
410#endif
411
412#define PPC_CONTEXT_OFFSET_TP PPC_CONTEXT_VOLATILE_SIZE
413
414#ifdef RTEMS_SMP
415 #define PPC_CONTEXT_OFFSET_IS_EXECUTING \
416 (PPC_CONTEXT_OFFSET_TP + PPC_REG_SIZE)
417#endif
418
419#ifndef ASM
420typedef struct {
421#if (PPC_HAS_FPU == 1)
422 /* The ABIs (PowerOpen/SVR4/EABI) only require saving f14-f31 over
423 * procedure calls. However, this would mean that the interrupt
424 * frame had to hold f0-f13, and the fpscr. And as the majority
425 * of tasks will not have an FP context, we will save the whole
426 * context here.
427 */
428#if (PPC_HAS_DOUBLE == 1)
429 double f[32];
430 uint64_t fpscr;
431#else
432 float f[32];
433 uint32_t fpscr;
434#endif
435#endif /* (PPC_HAS_FPU == 1) */
437
438#endif /* ASM */
439
440/*
441 * Does the CPU follow the simple vectored interrupt model?
442 *
443 * If TRUE, then RTEMS allocates the vector table it internally manages.
444 * If FALSE, then the BSP is assumed to allocate and manage the vector
445 * table
446 *
447 * PowerPC Specific Information:
448 *
449 * The PowerPC and x86 were the first to use the PIC interrupt model.
450 * They do not use the simple vectored interrupt model.
451 */
452#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
453
454/*
455 * Does the RTEMS invoke the user's ISR with the vector number and
456 * a pointer to the saved interrupt frame (1) or just the vector
457 * number (0)?
458 */
459
460#define CPU_ISR_PASSES_FRAME_POINTER FALSE
461
462/*
463 * Should the saving of the floating point registers be deferred
464 * until a context switch is made to another different floating point
465 * task?
466 *
467 * If TRUE, then the floating point context will not be stored until
468 * necessary. It will remain in the floating point registers and not
469 * disturned until another floating point task is switched to.
470 *
471 * If FALSE, then the floating point context is saved when a floating
472 * point task is switched out and restored when the next floating point
473 * task is restored. The state of the floating point registers between
474 * those two operations is not specified.
475 *
476 * If the floating point context does NOT have to be saved as part of
477 * interrupt dispatching, then it should be safe to set this to TRUE.
478 *
479 * Setting this flag to TRUE results in using a different algorithm
480 * for deciding when to save and restore the floating point context.
481 * The deferred FP switch algorithm minimizes the number of times
482 * the FP context is saved and restored. The FP context is not saved
483 * until a context switch is made to another, different FP task.
484 * Thus in a system with only one FP task, the FP context will never
485 * be saved or restored.
486 *
487 * Note, however that compilers may use floating point registers/
488 * instructions for optimization or they may save/restore FP registers
489 * on the stack. You must not use deferred switching in these cases
490 * and on the PowerPC attempting to do so will raise a "FP unavailable"
491 * exception.
492 */
493/*
494 * ACB Note: This could make debugging tricky..
495 */
496
497/* conservative setting (FALSE); probably doesn't affect performance too much */
498#define CPU_USE_DEFERRED_FP_SWITCH FALSE
499
500#define CPU_ENABLE_ROBUST_THREAD_DISPATCH FALSE
501
502/*
503 * Processor defined structures required for cpukit/score.
504 */
505
506#ifndef ASM
507
508/*
509 * This variable is optional. It is used on CPUs on which it is difficult
510 * to generate an "uninitialized" FP context. It is filled in by
511 * _CPU_Initialize and copied into the task's FP context area during
512 * _CPU_Context_Initialize.
513 */
514
515/* EXTERN Context_Control_fp _CPU_Null_fp_context; */
516
517#endif /* ndef ASM */
518
519/*
520 * This defines the number of levels and the mask used to pick those
521 * bits out of a thread mode.
522 */
523
524#define CPU_MODES_INTERRUPT_MASK 0x00000001 /* interrupt level in mode */
525
526/*
527 * The size of the floating point context area. On some CPUs this
528 * will not be a "sizeof" because the format of the floating point
529 * area is not defined -- only the size is. This is usually on
530 * CPUs with a "floating point save context" instruction.
531 */
532
533#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
534
535/*
536 * (Optional) # of bytes for libmisc/stackchk to check
537 * If not specifed, then it defaults to something reasonable
538 * for most architectures.
539 */
540
541#define CPU_STACK_CHECK_PATTERN_INITIALIZER \
542 { 0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06, \
543 0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06, \
544 0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06, \
545 0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06, \
546 0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06, \
547 0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06, \
548 0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06, \
549 0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06 }
550
551/*
552 * Amount of extra stack (above minimum stack size) required by
553 * MPCI receive server thread. Remember that in a multiprocessor
554 * system this thread must exist and be able to process all directives.
555 */
556
557#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
558
559/*
560 * This is defined if the port has a special way to report the ISR nesting
561 * level. Most ports maintain the variable _ISR_Nest_level. Note that
562 * this is not an option - RTEMS/score _relies_ on _ISR_Nest_level
563 * being maintained (e.g. watchdog queues).
564 */
565
566#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
567
568/*
569 * ISR handler macros
570 */
571
572/*
573 * Disable all interrupts for an RTEMS critical section. The previous
574 * level is returned in _isr_cookie.
575 */
576
577#ifndef ASM
578
580{
581 return ( level & MSR_EE ) != 0;
582}
583
584#if !defined(PPC_DISABLE_INLINE_ISR_DISABLE_ENABLE)
585
586static inline uint32_t _CPU_ISR_Get_level( void )
587{
588 uint32_t msr;
589 _CPU_MSR_GET(msr);
590 if (msr & MSR_EE) return 0;
591 else return 1;
592}
593
594static inline void _CPU_ISR_Set_level( uint32_t level )
595{
596 uint32_t msr;
597 _CPU_MSR_GET(msr);
598 if (!(level & CPU_MODES_INTERRUPT_MASK)) {
599 msr |= ppc_interrupt_get_disable_mask();
600 }
601 else {
602 msr &= ~ppc_interrupt_get_disable_mask();
603 }
604 _CPU_MSR_SET(msr);
605}
606#else
607/* disable, enable, etc. are in registers.h */
608uint32_t ppc_get_interrupt_level( void );
609void ppc_set_interrupt_level( uint32_t level );
610#define _CPU_ISR_Get_level( _new_level ) ppc_get_interrupt_level()
611#define _CPU_ISR_Set_level( _new_level ) ppc_set_interrupt_level(_new_level)
612#endif
613
614#endif /* ASM */
615
616#define _CPU_Fatal_halt( _source, _error ) \
617 do { \
618 ppc_interrupt_disable(); \
619 __asm__ volatile ( \
620 "mr 3, %0\n" \
621 "mr 4, %1\n" \
622 "1:\n" \
623 "b 1b\n" \
624 : \
625 : "r" (_source), "r" (_error) \
626 : "memory" \
627 ); \
628 } while ( 0 )
629
630/*
631 * Should be large enough to run all RTEMS tests. This ensures
632 * that a "reasonable" small application should not have any problems.
633 */
634
635#define CPU_STACK_MINIMUM_SIZE (1024*8)
636
637#if defined(__powerpc64__)
638#define CPU_SIZEOF_POINTER 8
639#else
640#define CPU_SIZEOF_POINTER 4
641#endif
642
643/*
644 * CPU's worst alignment requirement for data types on a byte boundary. This
645 * alignment does not take into account the requirements for the stack.
646 */
647
648#define CPU_ALIGNMENT (PPC_ALIGNMENT)
649
650/*
651 * This number corresponds to the byte alignment requirement for the
652 * heap handler. This alignment requirement may be stricter than that
653 * for the data types alignment specified by CPU_ALIGNMENT. It is
654 * common for the heap to follow the same alignment requirement as
655 * CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict enough for the heap,
656 * then this should be set to CPU_ALIGNMENT.
657 *
658 * NOTE: This does not have to be a power of 2. It does have to
659 * be greater or equal to than CPU_ALIGNMENT.
660 */
661
662#define CPU_HEAP_ALIGNMENT (PPC_ALIGNMENT)
663
664/*
665 * This number corresponds to the byte alignment requirement for the
666 * stack. This alignment requirement may be stricter than that for the
667 * data types alignment specified by CPU_ALIGNMENT. If the CPU_ALIGNMENT
668 * is strict enough for the stack, then this should be set to 0.
669 *
670 * NOTE: This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
671 */
672
673#define CPU_STACK_ALIGNMENT (PPC_STACK_ALIGNMENT)
674
675#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
676
677#ifndef ASM
678/* The following routine swaps the endian format of an unsigned int.
679 * It must be static because it is referenced indirectly.
680 *
681 * This version will work on any processor, but if there is a better
682 * way for your CPU PLEASE use it. The most common way to do this is to:
683 *
684 * swap least significant two bytes with 16-bit rotate
685 * swap upper and lower 16-bits
686 * swap most significant two bytes with 16-bit rotate
687 *
688 * Some CPUs have special instructions which swap a 32-bit quantity in
689 * a single instruction (e.g. i486). It is probably best to avoid
690 * an "endian swapping control bit" in the CPU. One good reason is
691 * that interrupts would probably have to be disabled to ensure that
692 * an interrupt does not try to access the same "chunk" with the wrong
693 * endian. Another good reason is that on some CPUs, the endian bit
694 * endianness for ALL fetches -- both code and data -- so the code
695 * will be fetched incorrectly.
696 */
697
698static inline uint32_t CPU_swap_u32(
699 uint32_t value
700)
701{
702 uint32_t swapped;
703
704 __asm__ volatile("rlwimi %0,%1,8,24,31;"
705 "rlwimi %0,%1,24,16,23;"
706 "rlwimi %0,%1,8,8,15;"
707 "rlwimi %0,%1,24,0,7;" :
708 "=&r" ((swapped)) : "r" ((value)));
709
710 return( swapped );
711}
712
713#define CPU_swap_u16( value ) \
714 (((value&0xff) << 8) | ((value >> 8)&0xff))
715
716typedef uint32_t CPU_Counter_ticks;
717
718uint32_t _CPU_Counter_frequency( void );
719
720static inline CPU_Counter_ticks _CPU_Counter_read( void )
721{
722 CPU_Counter_ticks value;
723
724#if defined(__PPC_CPU_E6500__)
725 /* Use Alternate Time Base */
726 __asm__ volatile( "mfspr %0, 526" : "=r" (value) );
727#elif defined(mpc860)
728 __asm__ volatile( "mftb %0" : "=r" (value) );
729#else
730 __asm__ volatile( "mfspr %0, 268" : "=r" (value) );
731#endif
732
733 return value;
734}
735
736static inline CPU_Counter_ticks _CPU_Counter_difference(
737 CPU_Counter_ticks second,
738 CPU_Counter_ticks first
739)
740{
741 return second - first;
742}
743
744#endif /* ASM */
745
746
747#ifndef ASM
748/* Context handler macros */
749
750/*
751 * Initialize the context to a state suitable for starting a
752 * task after a context restore operation. Generally, this
753 * involves:
754 *
755 * - setting a starting address
756 * - preparing the stack
757 * - preparing the stack and frame pointers
758 * - setting the proper interrupt level in the context
759 * - initializing the floating point context
760 *
761 * This routine generally does not set any unnecessary register
762 * in the context. The state of the "general data" registers is
763 * undefined at task start time.
764 */
765
767 Context_Control *the_context,
768 void *stack_base,
769 size_t size,
770 uint32_t new_level,
771 void *entry_point,
772 bool is_fp,
773 void *tls_area
774);
775
776/*
777 * This routine is responsible for somehow restarting the currently
778 * executing task. If you are lucky, then all that is necessary
779 * is restoring the context. Otherwise, there will need to be
780 * a special assembly routine which does something special in this
781 * case. Context_Restore should work most of the time. It will
782 * not work if restarting self conflicts with the stack frame
783 * assumptions of restoring a context.
784 */
785
786#define _CPU_Context_Restart_self( _the_context ) \
787 _CPU_Context_restore( (_the_context) );
788
789/*
790 * This routine initializes the FP context area passed to it to.
791 * There are a few standard ways in which to initialize the
792 * floating point context. The code included for this macro assumes
793 * that this is a CPU in which a "initial" FP context was saved into
794 * _CPU_Null_fp_context and it simply copies it to the destination
795 * context passed to it.
796 *
797 * Other models include (1) not doing anything, and (2) putting
798 * a "null FP status word" in the correct place in the FP context.
799 */
800
801#define _CPU_Context_Initialize_fp( _destination ) \
802 memset( *(_destination), 0, sizeof( **(_destination) ) )
803
804/* end of Context handler macros */
805#endif /* ASM */
806
807#ifndef ASM
808/* Bitfield handler macros */
809
810#define CPU_USE_GENERIC_BITFIELD_CODE FALSE
811
812/*
813 * This routine sets _output to the bit number of the first bit
814 * set in _value. _value is of CPU dependent type Priority_bit_map_Word.
815 * This type may be either 16 or 32 bits wide although only the 16
816 * least significant bits will be used.
817 *
818 * There are a number of variables in using a "find first bit" type
819 * instruction.
820 *
821 * (1) What happens when run on a value of zero?
822 * (2) Bits may be numbered from MSB to LSB or vice-versa.
823 * (3) The numbering may be zero or one based.
824 * (4) The "find first bit" instruction may search from MSB or LSB.
825 *
826 * RTEMS guarantees that (1) will never happen so it is not a concern.
827 * (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
828 * _CPU_Priority_Bits_index(). These three form a set of routines
829 * which must logically operate together. Bits in the _value are
830 * set and cleared based on masks built by _CPU_Priority_mask().
831 * The basic major and minor values calculated by _Priority_Major()
832 * and _Priority_Minor() are "massaged" by _CPU_Priority_Bits_index()
833 * to properly range between the values returned by the "find first bit"
834 * instruction. This makes it possible for _Priority_Get_highest() to
835 * calculate the major and directly index into the minor table.
836 * This mapping is necessary to ensure that 0 (a high priority major/minor)
837 * is the first bit found.
838 *
839 * This entire "find first bit" and mapping process depends heavily
840 * on the manner in which a priority is broken into a major and minor
841 * components with the major being the 4 MSB of a priority and minor
842 * the 4 LSB. Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
843 * priority. And (15 << 4) + 14 corresponds to priority 254 -- the next
844 * to the lowest priority.
845 *
846 * If your CPU does not have a "find first bit" instruction, then
847 * there are ways to make do without it. Here are a handful of ways
848 * to implement this in software:
849 *
850 * - a series of 16 bit test instructions
851 * - a "binary search using if's"
852 * - _number = 0
853 * if _value > 0x00ff
854 * _value >>=8
855 * _number = 8;
856 *
857 * if _value > 0x0000f
858 * _value >=8
859 * _number += 4
860 *
861 * _number += bit_set_table[ _value ]
862 *
863 * where bit_set_table[ 16 ] has values which indicate the first
864 * bit set
865 */
866
867#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
868 { \
869 __asm__ volatile ("cntlzw %0, %1" : "=r" ((_output)), "=r" ((_value)) : \
870 "1" ((_value))); \
871 (_output) = (_output) - 16; \
872 }
873
874/* end of Bitfield handler macros */
875
876/*
877 * This routine builds the mask which corresponds to the bit fields
878 * as searched by _CPU_Bitfield_Find_first_bit(). See the discussion
879 * for that routine.
880 */
881
882#define _CPU_Priority_Mask( _bit_number ) \
883 ( 0x8000u >> (_bit_number) )
884
885/*
886 * This routine translates the bit numbers returned by
887 * _CPU_Bitfield_Find_first_bit() into something suitable for use as
888 * a major or minor component of a priority. See the discussion
889 * for that routine.
890 */
891
892#define _CPU_Priority_bits_index( _priority ) \
893 (_priority)
894
895/* end of Priority handler macros */
896#endif /* ASM */
897
898/* functions */
899
900#ifndef ASM
901
902/*
903 * _CPU_Initialize
904 *
905 * This routine performs CPU dependent initialization.
906 */
907
908void _CPU_Initialize(void);
909
910void *_CPU_Thread_Idle_body( uintptr_t ignored );
911
912/*
913 * _CPU_Context_switch
914 *
915 * This routine switches from the run context to the heir context.
916 */
917
919 Context_Control *run,
920 Context_Control *heir
921);
922
923/*
924 * _CPU_Context_restore
925 *
926 * This routine is generallu used only to restart self in an
927 * efficient manner. It may simply be a label in _CPU_Context_switch.
928 *
929 * NOTE: May be unnecessary to reload some registers.
930 */
931
933 Context_Control *new_context
935
936/*
937 * _CPU_Context_save_fp
938 *
939 * This routine saves the floating point context passed to it.
940 */
941
943 Context_Control_fp **fp_context_ptr
944);
945
946/*
947 * _CPU_Context_restore_fp
948 *
949 * This routine restores the floating point context passed to it.
950 */
951
953 Context_Control_fp **fp_context_ptr
954);
955
956#ifdef RTEMS_SMP
957 uint32_t _CPU_SMP_Initialize( void );
958
959 bool _CPU_SMP_Start_processor( uint32_t cpu_index );
960
961 void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
962
963 void _CPU_SMP_Prepare_start_multitasking( void );
964
965 static inline uint32_t _CPU_SMP_Get_current_processor( void )
966 {
967 uint32_t pir;
968
969 /* Use Book E Processor ID Register (PIR) */
970 __asm__ volatile (
971 "mfspr %[pir], 286"
972 : [pir] "=&r" (pir)
973 );
974
975 return pir;
976 }
977
978 void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
979
980 static inline void _CPU_SMP_Processor_event_broadcast( void )
981 {
982 __asm__ volatile ( "" : : : "memory" );
983 }
984
985 static inline void _CPU_SMP_Processor_event_receive( void )
986 {
987 __asm__ volatile ( "" : : : "memory" );
988 }
989#endif
990
991typedef struct {
992 uintptr_t EXC_SRR0;
993 uintptr_t EXC_SRR1;
994 uint32_t _EXC_number;
995 uint32_t RESERVED_FOR_ALIGNMENT_0;
996 uint32_t EXC_CR;
997 uint32_t EXC_XER;
998 uintptr_t EXC_CTR;
999 uintptr_t EXC_LR;
1000 uintptr_t RESERVED_FOR_ALIGNMENT_1;
1001 #ifdef __SPE__
1002 uint32_t EXC_SPEFSCR;
1003 uint64_t EXC_ACC;
1004 #endif
1005 PPC_GPR_TYPE GPR0;
1006 PPC_GPR_TYPE GPR1;
1007 PPC_GPR_TYPE GPR2;
1008 PPC_GPR_TYPE GPR3;
1009 PPC_GPR_TYPE GPR4;
1010 PPC_GPR_TYPE GPR5;
1011 PPC_GPR_TYPE GPR6;
1012 PPC_GPR_TYPE GPR7;
1013 PPC_GPR_TYPE GPR8;
1014 PPC_GPR_TYPE GPR9;
1015 PPC_GPR_TYPE GPR10;
1016 PPC_GPR_TYPE GPR11;
1017 PPC_GPR_TYPE GPR12;
1018 PPC_GPR_TYPE GPR13;
1019 PPC_GPR_TYPE GPR14;
1020 PPC_GPR_TYPE GPR15;
1021 PPC_GPR_TYPE GPR16;
1022 PPC_GPR_TYPE GPR17;
1023 PPC_GPR_TYPE GPR18;
1024 PPC_GPR_TYPE GPR19;
1025 PPC_GPR_TYPE GPR20;
1026 PPC_GPR_TYPE GPR21;
1027 PPC_GPR_TYPE GPR22;
1028 PPC_GPR_TYPE GPR23;
1029 PPC_GPR_TYPE GPR24;
1030 PPC_GPR_TYPE GPR25;
1031 PPC_GPR_TYPE GPR26;
1032 PPC_GPR_TYPE GPR27;
1033 PPC_GPR_TYPE GPR28;
1034 PPC_GPR_TYPE GPR29;
1035 PPC_GPR_TYPE GPR30;
1036 PPC_GPR_TYPE GPR31;
1037 uintptr_t RESERVED_FOR_ALIGNMENT_2;
1038 #ifdef PPC_MULTILIB_ALTIVEC
1039 uint32_t VRSAVE;
1040 uint32_t RESERVED_FOR_ALIGNMENT_3[3];
1041
1042 /* This field must take stvewx/lvewx requirements into account */
1043 uint32_t RESERVED_FOR_ALIGNMENT_4[3];
1044 uint32_t VSCR;
1045
1046 uint8_t V0[16];
1047 uint8_t V1[16];
1048 uint8_t V2[16];
1049 uint8_t V3[16];
1050 uint8_t V4[16];
1051 uint8_t V5[16];
1052 uint8_t V6[16];
1053 uint8_t V7[16];
1054 uint8_t V8[16];
1055 uint8_t V9[16];
1056 uint8_t V10[16];
1057 uint8_t V11[16];
1058 uint8_t V12[16];
1059 uint8_t V13[16];
1060 uint8_t V14[16];
1061 uint8_t V15[16];
1062 uint8_t V16[16];
1063 uint8_t V17[16];
1064 uint8_t V18[16];
1065 uint8_t V19[16];
1066 uint8_t V20[16];
1067 uint8_t V21[16];
1068 uint8_t V22[16];
1069 uint8_t V23[16];
1070 uint8_t V24[16];
1071 uint8_t V25[16];
1072 uint8_t V26[16];
1073 uint8_t V27[16];
1074 uint8_t V28[16];
1075 uint8_t V29[16];
1076 uint8_t V30[16];
1077 uint8_t V31[16];
1078 #endif
1079 #ifdef PPC_MULTILIB_FPU
1080 double F0;
1081 double F1;
1082 double F2;
1083 double F3;
1084 double F4;
1085 double F5;
1086 double F6;
1087 double F7;
1088 double F8;
1089 double F9;
1090 double F10;
1091 double F11;
1092 double F12;
1093 double F13;
1094 double F14;
1095 double F15;
1096 double F16;
1097 double F17;
1098 double F18;
1099 double F19;
1100 double F20;
1101 double F21;
1102 double F22;
1103 double F23;
1104 double F24;
1105 double F25;
1106 double F26;
1107 double F27;
1108 double F28;
1109 double F29;
1110 double F30;
1111 double F31;
1112 uint64_t FPSCR;
1113 uint64_t RESERVED_FOR_ALIGNMENT_5;
1114 #endif
1116
1118
1119/*
1120 * _CPU_Initialize_altivec()
1121 *
1122 * Global altivec-related initialization.
1123 */
1124void
1125_CPU_Initialize_altivec(void);
1126
1127/*
1128 * _CPU_Context_switch_altivec
1129 *
1130 * This routine switches the altivec contexts passed to it.
1131 */
1132
1133void
1134_CPU_Context_switch_altivec(
1135 ppc_context *from,
1136 ppc_context *to
1137);
1138
1139/*
1140 * _CPU_Context_restore_altivec
1141 *
1142 * This routine restores the altivec context passed to it.
1143 */
1144
1145void
1146_CPU_Context_restore_altivec(
1148);
1149
1150/*
1151 * _CPU_Context_initialize_altivec
1152 *
1153 * This routine initializes the altivec context passed to it.
1154 */
1155
1156void
1157_CPU_Context_initialize_altivec(
1159);
1160
1161void _CPU_Fatal_error(
1162 uint32_t _error
1163);
1164
1166typedef uintptr_t CPU_Uint32ptr;
1167
1168#endif /* ASM */
1169
1170#ifdef __cplusplus
1171}
1172#endif
1173
1174#endif /* _RTEMS_SCORE_CPU_H */
Basic Definitions.
IBM/Motorola PowerPC Definitions.
#define RTEMS_NO_RETURN
Definition: basedefs.h:102
#define RTEMS_INLINE_ROUTINE
Definition: basedefs.h:66
void _CPU_ISR_Set_level(uint32_t level)
Sets the hardware interrupt level by the level value.
Definition: cpu.c:57
bool _CPU_ISR_Is_enabled(uint32_t level)
Returns true if interrupts are enabled in the specified ISR level, otherwise returns false.
Definition: cpu.h:375
void * _CPU_Thread_Idle_body(uintptr_t ignored)
Definition: idle-mcf5272.c:20
void _CPU_Initialize(void)
CPU initialization.
Definition: cpu.c:45
uintptr_t CPU_Uint32ptr
Definition: cpu.h:662
uint32_t _CPU_Counter_frequency(void)
Returns the current CPU counter frequency in Hz.
Definition: system-clocks.c:112
void _CPU_Context_switch(Context_Control *run, Context_Control *heir)
CPU switch context.
Definition: cpu_asm.c:91
CPU_Counter_ticks _CPU_Counter_read(void)
Returns the current CPU counter value.
Definition: system-clocks.c:117
void _CPU_Context_restore(Context_Control *new_context) RTEMS_NO_RETURN
Definition: cpu_asm.c:111
uint32_t _CPU_ISR_Get_level(void)
Definition: cpu.c:88
void _CPU_Context_Initialize(Context_Control *context, void *stack_area_begin, size_t stack_area_size, uint32_t new_level, void(*entry_point)(void), bool is_fp, void *tls_area)
Initializes the CPU context.
Definition: epiphany-context-initialize.c:40
void _CPU_Exception_frame_print(const CPU_Exception_frame *frame)
Prints the exception frame via printk().
Definition: vectorexceptions.c:45
register struct Per_CPU_Control *_SPARC_Per_CPU_current __asm__("g6")
The pointer to the current per-CPU control is available via register g6.
uint32_t CPU_Counter_ticks
Unsigned integer type for CPU counter values.
Definition: cpu.h:1210
#define _CPU_Context_restore_fp(_fp_context_ptr)
Nothing to do due to the synchronous or lazy floating point switch.
Definition: cpu.h:904
#define _CPU_Context_save_fp(_fp_context_ptr)
Nothing to do due to the synchronous or lazy floating point switch.
Definition: cpu.h:898
#define CPU_MODES_INTERRUPT_MASK
Definition: cpu.h:161
The set of registers that specifies the complete processor state.
Definition: cpu.h:629
SPARC basic context.
Definition: cpu.h:194
Thread register context.
Definition: cpu.h:194
Definition: bootldr.h:45
Definition: cpu.h:204
unsigned context
Definition: tlb.h:1
unsigned size
Definition: tte.h:1