RTEMS  5.0.0
percpu.h
Go to the documentation of this file.
1 
8 /*
9  * COPYRIGHT (c) 1989-2011.
10  * On-Line Applications Research Corporation (OAR).
11  *
12  * Copyright (c) 2012, 2018 embedded brains GmbH
13  *
14  * The license and distribution terms for this file may be
15  * found in the file LICENSE in this distribution or at
16  * http://www.rtems.org/license/LICENSE.
17  */
18 
19 #ifndef _RTEMS_PERCPU_H
20 #define _RTEMS_PERCPU_H
21 
22 #include <rtems/score/cpuimpl.h>
23 
24 #if defined( ASM )
25  #include <rtems/asm.h>
26 #else
27  #include <rtems/score/assert.h>
28  #include <rtems/score/chain.h>
29  #include <rtems/score/isrlock.h>
30  #include <rtems/score/smp.h>
31  #include <rtems/score/smplock.h>
32  #include <rtems/score/timestamp.h>
33  #include <rtems/score/watchdog.h>
34 #endif
35 
36 #ifdef __cplusplus
37 extern "C" {
38 #endif
39 
40 #if defined(RTEMS_SMP)
41  #if defined(RTEMS_PROFILING)
42  #define PER_CPU_CONTROL_SIZE_APPROX ( 512 + CPU_INTERRUPT_FRAME_SIZE )
43  #elif defined(RTEMS_DEBUG) || CPU_SIZEOF_POINTER > 4
44  #define PER_CPU_CONTROL_SIZE_APPROX ( 256 + CPU_INTERRUPT_FRAME_SIZE )
45  #else
46  #define PER_CPU_CONTROL_SIZE_APPROX ( 128 + CPU_INTERRUPT_FRAME_SIZE )
47  #endif
48 
49  /*
50  * This ensures that on SMP configurations the individual per-CPU controls
51  * are on different cache lines to prevent false sharing. This define can be
52  * used in assembler code to easily get the per-CPU control for a particular
53  * processor.
54  */
55  #if PER_CPU_CONTROL_SIZE_APPROX > 1024
56  #define PER_CPU_CONTROL_SIZE_LOG2 11
57  #elif PER_CPU_CONTROL_SIZE_APPROX > 512
58  #define PER_CPU_CONTROL_SIZE_LOG2 10
59  #elif PER_CPU_CONTROL_SIZE_APPROX > 256
60  #define PER_CPU_CONTROL_SIZE_LOG2 9
61  #elif PER_CPU_CONTROL_SIZE_APPROX > 128
62  #define PER_CPU_CONTROL_SIZE_LOG2 8
63  #else
64  #define PER_CPU_CONTROL_SIZE_LOG2 7
65  #endif
66 
67  #define PER_CPU_CONTROL_SIZE ( 1 << PER_CPU_CONTROL_SIZE_LOG2 )
68 #endif
69 
70 #if !defined( ASM )
71 
72 struct Record_Control;
73 
74 struct _Thread_Control;
75 
76 struct Scheduler_Context;
77 
91 #if defined( RTEMS_SMP )
92 
126 typedef enum {
135  PER_CPU_STATE_INITIAL,
136 
151  PER_CPU_STATE_READY_TO_START_MULTITASKING,
152 
161  PER_CPU_STATE_REQUEST_START_MULTITASKING,
162 
166  PER_CPU_STATE_UP,
167 
171  PER_CPU_STATE_SHUTDOWN
172 } Per_CPU_State;
173 
174 #endif /* defined( RTEMS_SMP ) */
175 
179 typedef struct {
180 #if defined( RTEMS_PROFILING )
181 
186  CPU_Counter_ticks thread_dispatch_disabled_instant;
187 
192  CPU_Counter_ticks max_thread_dispatch_disabled_time;
193 
201  CPU_Counter_ticks max_interrupt_time;
202 
207  CPU_Counter_ticks max_interrupt_delay;
208 
215  uint64_t thread_dispatch_disabled_count;
216 
226  uint64_t total_thread_dispatch_disabled_time;
227 
234  uint64_t interrupt_count;
235 
244  uint64_t total_interrupt_time;
245 #endif /* defined( RTEMS_PROFILING ) */
246 } Per_CPU_Stats;
247 
251 typedef enum {
260 
269 
278 
284 
290 typedef struct Per_CPU_Control {
291  #if CPU_PER_CPU_CONTROL_SIZE > 0
292 
295  CPU_Per_CPU_control cpu_per_cpu;
296  #endif
297 
302 
307 
312  uint32_t isr_nest_level;
313 
323 
328  volatile uint32_t thread_dispatch_disable_level;
329 
343  volatile bool dispatch_necessary;
344 
345  /*
346  * Ensure that the executing member is at least 4-byte aligned, see
347  * PER_CPU_OFFSET_EXECUTING. This is necessary on CPU ports with relaxed
348  * alignment restrictions, e.g. type alignment is less than the type size.
349  */
350  bool reserved_for_executing_alignment[ 3 ];
351 
363 
379 
380 #if defined(RTEMS_SMP)
381  CPU_Interrupt_frame Interrupt_frame;
382 #endif
383 
395 
399  struct {
403  ISR_LOCK_MEMBER( Lock )
404 
405 
409  uint64_t ticks;
410 
417  } Watchdog;
418 
419  #if defined( RTEMS_SMP )
420 
428  SMP_ticket_lock_Control Lock;
429 
430  #if defined( RTEMS_PROFILING )
431 
434  SMP_lock_Stats Lock_stats;
435 
439  SMP_lock_Stats_context Lock_stats_context;
440  #endif
441 
447  Chain_Control Threads_in_need_for_help;
448 
455  Atomic_Ulong message;
456 
457  struct {
464  const struct _Scheduler_Control *control;
465 
472  const struct Scheduler_Context *context;
473 
478  struct _Thread_Control *idle_if_online_and_unused;
479  } Scheduler;
480 
486  char *data;
487 
495  Per_CPU_State state;
496 
503  Atomic_Uintptr before_multitasking_action;
504 
509  bool online;
510 
515  bool boot;
516  #endif
517 
518  struct Record_Control *record;
519 
520  Per_CPU_Stats Stats;
522 
523 #if defined( RTEMS_SMP )
524 typedef struct {
525  Per_CPU_Control per_cpu;
526  char unused_space_for_cache_line_alignment
527  [ PER_CPU_CONTROL_SIZE - sizeof( Per_CPU_Control ) ];
529 #else
530 typedef struct {
531  Per_CPU_Control per_cpu;
532 } Per_CPU_Control_envelope;
533 #endif
534 
540 extern Per_CPU_Control_envelope _Per_CPU_Information[] CPU_STRUCTURE_ALIGNMENT;
541 
542 #if defined( RTEMS_SMP )
543 #define _Per_CPU_Acquire( cpu ) \
544  _SMP_ticket_lock_Acquire( \
545  &( cpu )->Lock, \
546  &( cpu )->Lock_stats, \
547  &( cpu )->Lock_stats_context \
548  )
549 #else
550 #define _Per_CPU_Acquire( cpu ) \
551  do { \
552  (void) ( cpu ); \
553  } while ( 0 )
554 #endif
555 
556 #if defined( RTEMS_SMP )
557 #define _Per_CPU_Release( cpu ) \
558  _SMP_ticket_lock_Release( \
559  &( cpu )->Lock, \
560  &( cpu )->Lock_stats_context \
561  )
562 #else
563 #define _Per_CPU_Release( cpu ) \
564  do { \
565  (void) ( cpu ); \
566  } while ( 0 )
567 #endif
568 
569 #if defined( RTEMS_SMP )
570 #define _Per_CPU_ISR_disable_and_acquire( cpu, isr_cookie ) \
571  do { \
572  _ISR_Local_disable( isr_cookie ); \
573  _Per_CPU_Acquire( cpu ); \
574  } while ( 0 )
575 #else
576 #define _Per_CPU_ISR_disable_and_acquire( cpu, isr_cookie ) \
577  do { \
578  _ISR_Local_disable( isr_cookie ); \
579  (void) ( cpu ); \
580  } while ( 0 )
581 #endif
582 
583 #if defined( RTEMS_SMP )
584 #define _Per_CPU_Release_and_ISR_enable( cpu, isr_cookie ) \
585  do { \
586  _Per_CPU_Release( cpu ); \
587  _ISR_Local_enable( isr_cookie ); \
588  } while ( 0 )
589 #else
590 #define _Per_CPU_Release_and_ISR_enable( cpu, isr_cookie ) \
591  do { \
592  (void) ( cpu ); \
593  _ISR_Local_enable( isr_cookie ); \
594  } while ( 0 )
595 #endif
596 
597 #if defined( RTEMS_SMP )
598 #define _Per_CPU_Acquire_all( isr_cookie ) \
599  do { \
600  uint32_t ncpus = _SMP_Get_processor_count(); \
601  uint32_t cpu; \
602  _ISR_Local_disable( isr_cookie ); \
603  for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
604  _Per_CPU_Acquire( _Per_CPU_Get_by_index( cpu ) ); \
605  } \
606  } while ( 0 )
607 #else
608 #define _Per_CPU_Acquire_all( isr_cookie ) \
609  _ISR_Local_disable( isr_cookie )
610 #endif
611 
612 #if defined( RTEMS_SMP )
613 #define _Per_CPU_Release_all( isr_cookie ) \
614  do { \
615  uint32_t ncpus = _SMP_Get_processor_count(); \
616  uint32_t cpu; \
617  for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
618  _Per_CPU_Release( _Per_CPU_Get_by_index( cpu ) ); \
619  } \
620  _ISR_Local_enable( isr_cookie ); \
621  } while ( 0 )
622 #else
623 #define _Per_CPU_Release_all( isr_cookie ) \
624  _ISR_Local_enable( isr_cookie )
625 #endif
626 
627 /*
628  * If we get the current processor index in a context which allows thread
629  * dispatching, then we may already run on another processor right after the
630  * read instruction. There are very few cases in which this makes sense (here
631  * we can use _Per_CPU_Get_snapshot()). All other places must use
632  * _Per_CPU_Get() so that we can add checks for RTEMS_DEBUG.
633  */
634 #if defined( _CPU_Get_current_per_CPU_control )
635  #define _Per_CPU_Get_snapshot() _CPU_Get_current_per_CPU_control()
636 #else
637  #define _Per_CPU_Get_snapshot() \
638  ( &_Per_CPU_Information[ _SMP_Get_current_processor() ].per_cpu )
639 #endif
640 
641 #if defined( RTEMS_SMP )
642 static inline Per_CPU_Control *_Per_CPU_Get( void )
643 {
644  Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot();
645 
646  _Assert(
647  cpu_self->thread_dispatch_disable_level != 0 || _ISR_Get_level() != 0
648  );
649 
650  return cpu_self;
651 }
652 #else
653 #define _Per_CPU_Get() _Per_CPU_Get_snapshot()
654 #endif
655 
656 static inline Per_CPU_Control *_Per_CPU_Get_by_index( uint32_t index )
657 {
658  return &_Per_CPU_Information[ index ].per_cpu;
659 }
660 
661 static inline uint32_t _Per_CPU_Get_index( const Per_CPU_Control *cpu )
662 {
663  const Per_CPU_Control_envelope *per_cpu_envelope =
664  ( const Per_CPU_Control_envelope * ) cpu;
665 
666  return ( uint32_t ) ( per_cpu_envelope - &_Per_CPU_Information[ 0 ] );
667 }
668 
669 static inline struct _Thread_Control *_Per_CPU_Get_executing(
670  const Per_CPU_Control *cpu
671 )
672 {
673  return cpu->executing;
674 }
675 
676 static inline bool _Per_CPU_Is_processor_online(
677  const Per_CPU_Control *cpu
678 )
679 {
680 #if defined( RTEMS_SMP )
681  return cpu->online;
682 #else
683  (void) cpu;
684 
685  return true;
686 #endif
687 }
688 
689 static inline bool _Per_CPU_Is_boot_processor(
690  const Per_CPU_Control *cpu
691 )
692 {
693 #if defined( RTEMS_SMP )
694  return cpu->boot;
695 #else
696  (void) cpu;
697 
698  return true;
699 #endif
700 }
701 
702 #if defined( RTEMS_SMP )
703 
709 void _Per_CPU_Initialize(void);
710 
711 void _Per_CPU_State_change(
712  Per_CPU_Control *cpu,
713  Per_CPU_State new_state
714 );
715 
741 bool _Per_CPU_State_wait_for_non_initial_state(
742  uint32_t cpu_index,
743  uint32_t timeout_in_ns
744 );
745 
746 #endif /* defined( RTEMS_SMP ) */
747 
748 /*
749  * On a non SMP system, the _SMP_Get_current_processor() is defined to 0.
750  * Thus when built for non-SMP, there should be no performance penalty.
751  */
752 #define _Thread_Dispatch_disable_level \
753  _Per_CPU_Get()->thread_dispatch_disable_level
754 #define _Thread_Heir \
755  _Per_CPU_Get()->heir
756 
757 #if defined(_CPU_Get_thread_executing)
758 #define _Thread_Executing \
759  _CPU_Get_thread_executing()
760 #else
761 #define _Thread_Executing \
762  _Per_CPU_Get_executing( _Per_CPU_Get() )
763 #endif
764 
765 #define _ISR_Nest_level \
766  _Per_CPU_Get()->isr_nest_level
767 #define _CPU_Interrupt_stack_low \
768  _Per_CPU_Get()->interrupt_stack_low
769 #define _CPU_Interrupt_stack_high \
770  _Per_CPU_Get()->interrupt_stack_high
771 #define _Thread_Dispatch_necessary \
772  _Per_CPU_Get()->dispatch_necessary
773 
785 {
786  struct _Thread_Control *executing;
787 
788  #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing)
789  ISR_Level level;
790 
791  _ISR_Local_disable( level );
792  #endif
793 
794  executing = _Thread_Executing;
795 
796  #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing)
797  _ISR_Local_enable( level );
798  #endif
799 
800  return executing;
801 }
802 
805 #endif /* !defined( ASM ) */
806 
807 #if defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS )
808 
809 #define PER_CPU_INTERRUPT_STACK_LOW \
810  CPU_PER_CPU_CONTROL_SIZE
811 #define PER_CPU_INTERRUPT_STACK_HIGH \
812  PER_CPU_INTERRUPT_STACK_LOW + CPU_SIZEOF_POINTER
813 
814 #define INTERRUPT_STACK_LOW \
815  (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_LOW)
816 #define INTERRUPT_STACK_HIGH \
817  (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_HIGH)
818 
819 /*
820  * These are the offsets of the required elements in the per CPU table.
821  */
822 #define PER_CPU_ISR_NEST_LEVEL \
823  PER_CPU_INTERRUPT_STACK_HIGH + CPU_SIZEOF_POINTER
824 #define PER_CPU_ISR_DISPATCH_DISABLE \
825  PER_CPU_ISR_NEST_LEVEL + 4
826 #define PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL \
827  PER_CPU_ISR_DISPATCH_DISABLE + 4
828 #define PER_CPU_DISPATCH_NEEDED \
829  PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL + 4
830 #define PER_CPU_OFFSET_EXECUTING \
831  PER_CPU_DISPATCH_NEEDED + 4
832 #define PER_CPU_OFFSET_HEIR \
833  PER_CPU_OFFSET_EXECUTING + CPU_SIZEOF_POINTER
834 #if defined(RTEMS_SMP)
835 #define PER_CPU_INTERRUPT_FRAME_AREA \
836  PER_CPU_OFFSET_HEIR + CPU_SIZEOF_POINTER
837 #endif
838 
839 #define THREAD_DISPATCH_DISABLE_LEVEL \
840  (SYM(_Per_CPU_Information) + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL)
841 #define ISR_NEST_LEVEL \
842  (SYM(_Per_CPU_Information) + PER_CPU_ISR_NEST_LEVEL)
843 #define DISPATCH_NEEDED \
844  (SYM(_Per_CPU_Information) + PER_CPU_DISPATCH_NEEDED)
845 
846 #endif /* defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS ) */
847 
848 #ifdef __cplusplus
849 }
850 #endif
851 
852 #endif
853 /* end of include file */
Per-CPU statistics.
Definition: percpu.h:179
void * interrupt_stack_high
The interrupt stack high address for this processor.
Definition: percpu.h:306
SuperCore SMP Support API.
Index for realtime clock per-CPU watchdog header.
Definition: percpu.h:268
Definition: media-server.c:33
int64_t Timestamp_Control
Definition: timestamp.h:52
Count of per-CPU watchdog headers.
Definition: percpu.h:282
Scheduler context.
Definition: scheduler.h:249
struct _Thread_Control * executing
This is the thread executing on this processor.
Definition: percpu.h:362
Definition: record.h:45
#define RTEMS_INLINE_ROUTINE
Definition: basedefs.h:65
#define ISR_LOCK_MEMBER(_designator)
Defines an ISR lock member.
Definition: isrlock.h:89
Interrupt stack frame (ISF).
Definition: cpu.h:306
Definition: chain.h:83
Helpers for Manipulating Timestamps.
Thread_Scheduler_control Scheduler
Scheduler related control.
Definition: thread.h:767
#define _ISR_Get_level()
Return current interrupt level.
Definition: isrlevel.h:125
Per_CPU_Control_envelope _Per_CPU_Information [] CPU_STRUCTURE_ALIGNMENT
Set of Per CPU Core Information.
Timestamp_Control cpu_usage_timestamp
The CPU usage timestamp contains the time point of the last heir thread change or last CPU usage upda...
Definition: percpu.h:394
Definition: percpu.h:530
Per_CPU_Watchdog_index
Per-CPU watchdog header index.
Definition: percpu.h:251
The CPU specific per-CPU control.
Definition: cpuimpl.h:54
#define _ISR_Local_disable(_level)
Disables interrupts on this processor.
Definition: isrlevel.h:54
uint32_t ISR_Level
Definition: isrlevel.h:38
Definition: thread.h:728
Per CPU Core Structure.
Definition: percpu.h:290
uint32_t isr_nest_level
Definition: percpu.h:312
void * interrupt_stack_low
The interrupt stack low address for this processor.
Definition: percpu.h:301
#define _ISR_Local_enable(_level)
Enables interrupts on this processor.
Definition: isrlevel.h:71
Constants and Structures Associated with Watchdog Timers.
uint32_t isr_dispatch_disable
Indicetes if an ISR thread dispatch is disabled.
Definition: percpu.h:322
Definition: intercom.c:74
volatile bool dispatch_necessary
This is set to true when this processor needs to run the thread dispatcher.
Definition: percpu.h:343
struct _Thread_Control * heir
This is the heir thread for this processor.
Definition: percpu.h:378
struct Per_CPU_Control Per_CPU_Control
Per CPU Core Structure.
The watchdog header to manage scheduled watchdogs.
Definition: watchdog.h:68
unsigned context
Definition: tlb.h:108
SMP Lock API.
Chain Handler API.
RTEMS_INLINE_ROUTINE struct _Thread_Control * _Thread_Get_executing(void)
Returns the thread control block of the executing thread.
Definition: percpu.h:784
Scheduler control.
Definition: scheduler.h:266
volatile uint32_t thread_dispatch_disable_level
The thread dispatch critical section nesting counter which is used to prevent context switches at ino...
Definition: percpu.h:328
Index for monotonic clock per-CPU watchdog header.
Definition: percpu.h:277
Index for tick clock per-CPU watchdog header.
Definition: percpu.h:259
ISR Locks.