RTEMS CPU Kit with SuperCore  4.11.2
schedulerimpl.h
Go to the documentation of this file.
1 
10 /*
11  * Copyright (C) 2010 Gedare Bloom.
12  * Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
13  * Copyright (c) 2014-2015 embedded brains GmbH
14  *
15  * The license and distribution terms for this file may be
16  * found in the file LICENSE in this distribution or at
17  * http://www.rtems.org/license/LICENSE.
18  */
19 
20 #ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
21 #define _RTEMS_SCORE_SCHEDULERIMPL_H
22 
23 #include <rtems/score/scheduler.h>
24 #include <rtems/score/cpusetimpl.h>
25 #include <rtems/score/smpimpl.h>
26 #include <rtems/score/threadimpl.h>
27 
28 #ifdef __cplusplus
29 extern "C" {
30 #endif
31 
45 
46 RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
47  const Scheduler_Control *scheduler
48 )
49 {
50  return scheduler->context;
51 }
52 
53 RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get(
54  const Thread_Control *the_thread
55 )
56 {
57 #if defined(RTEMS_SMP)
58  return the_thread->Scheduler.control;
59 #else
60  (void) the_thread;
61 
62  return &_Scheduler_Table[ 0 ];
63 #endif
64 }
65 
66 RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_own(
67  const Thread_Control *the_thread
68 )
69 {
70 #if defined(RTEMS_SMP)
71  return the_thread->Scheduler.own_control;
72 #else
73  (void) the_thread;
74 
75  return &_Scheduler_Table[ 0 ];
76 #endif
77 }
78 
79 RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU_index(
80  uint32_t cpu_index
81 )
82 {
83 #if defined(RTEMS_SMP)
84  return _Scheduler_Assignments[ cpu_index ].scheduler;
85 #else
86  (void) cpu_index;
87 
88  return &_Scheduler_Table[ 0 ];
89 #endif
90 }
91 
92 RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
93  const Per_CPU_Control *cpu
94 )
95 {
96  uint32_t cpu_index = _Per_CPU_Get_index( cpu );
97 
98  return _Scheduler_Get_by_CPU_index( cpu_index );
99 }
100 
101 RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_own_node(
102  const Thread_Control *the_thread
103 )
104 {
105 #if defined(RTEMS_SMP)
106  return the_thread->Scheduler.own_node;
107 #else
108  return the_thread->Scheduler.node;
109 #endif
110 }
111 
112 #if defined(RTEMS_SMP)
113 RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_user(
114  const Scheduler_Node *node
115 )
116 {
117  return node->user;
118 }
119 #endif
120 
129 /*
130  * Passing the Scheduler_Control* to these functions allows for multiple
131  * scheduler's to exist simultaneously, which could be useful on an SMP
132  * system. Then remote Schedulers may be accessible. How to protect such
133  * accesses remains an open problem.
134  */
135 
145 {
146  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
147 
148  ( *scheduler->Operations.schedule )( scheduler, the_thread );
149 }
150 
151 #if defined(RTEMS_SMP)
152 typedef struct {
153  Thread_Control *needs_help;
154  Thread_Control *next_needs_help;
155 } Scheduler_Ask_for_help_context ;
156 
157 RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_for_help_visitor(
158  Resource_Node *resource_node,
159  void *arg
160 )
161 {
162  bool done;
163  Scheduler_Ask_for_help_context *help_context = arg;
164  Thread_Control *previous_needs_help = help_context->needs_help;
165  Thread_Control *next_needs_help;
166  Thread_Control *offers_help =
167  THREAD_RESOURCE_NODE_TO_THREAD( resource_node );
168  const Scheduler_Control *scheduler = _Scheduler_Get_own( offers_help );
169 
170  next_needs_help = ( *scheduler->Operations.ask_for_help )(
171  scheduler,
172  offers_help,
173  previous_needs_help
174  );
175 
176  done = next_needs_help != previous_needs_help;
177 
178  if ( done ) {
179  help_context->next_needs_help = next_needs_help;
180  }
181 
182  return done;
183 }
184 
198 RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help(
199  Thread_Control *needs_help
200 )
201 {
202  do {
203  const Scheduler_Control *scheduler = _Scheduler_Get_own( needs_help );
204 
205  needs_help = ( *scheduler->Operations.ask_for_help )(
206  scheduler,
207  needs_help,
208  needs_help
209  );
210 
211  if ( needs_help != NULL ) {
212  Scheduler_Ask_for_help_context help_context = { needs_help, NULL };
213 
215  &needs_help->Resource_node,
216  _Scheduler_Ask_for_help_visitor,
217  &help_context
218  );
219 
220  needs_help = help_context.next_needs_help;
221  }
222  } while ( needs_help != NULL );
223 }
224 
225 RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help_if_necessary(
226  Thread_Control *needs_help
227 )
228 {
229  if (
230  needs_help != NULL
231  && _Resource_Node_owns_resources( &needs_help->Resource_node )
232  ) {
233  Scheduler_Node *node = _Scheduler_Thread_get_own_node( needs_help );
234 
235  if (
236  node->help_state != SCHEDULER_HELP_ACTIVE_RIVAL
237  || _Scheduler_Node_get_user( node ) != needs_help
238  ) {
239  _Scheduler_Ask_for_help( needs_help );
240  }
241  }
242 }
243 #endif
244 
254 {
255  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
256 #if defined(RTEMS_SMP)
257  Thread_Control *needs_help;
258 
259  needs_help =
260 #endif
261  ( *scheduler->Operations.yield )( scheduler, the_thread );
262 
263 #if defined(RTEMS_SMP)
264  _Scheduler_Ask_for_help_if_necessary( needs_help );
265 #endif
266 }
267 
279 {
280  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
281 
282  ( *scheduler->Operations.block )( scheduler, the_thread );
283 }
284 
296 {
297  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
298 #if defined(RTEMS_SMP)
299  Thread_Control *needs_help;
300 
301  needs_help =
302 #endif
303  ( *scheduler->Operations.unblock )( scheduler, the_thread );
304 
305 #if defined(RTEMS_SMP)
306  _Scheduler_Ask_for_help_if_necessary( needs_help );
307 #endif
308 }
309 
327  Thread_Control *the_thread,
328  Priority_Control new_priority,
329  bool prepend_it
330 )
331 {
332  const Scheduler_Control *scheduler = _Scheduler_Get_own( the_thread );
333 #if defined(RTEMS_SMP)
334  Thread_Control *needs_help;
335 
336  needs_help =
337 #endif
338  ( *scheduler->Operations.change_priority )(
339  scheduler,
340  the_thread,
341  new_priority,
342  prepend_it
343  );
344 
345 #if defined(RTEMS_SMP)
346  _Scheduler_Ask_for_help_if_necessary( needs_help );
347 #endif
348 }
349 
362  const Scheduler_Control *scheduler,
363  Thread_Control *the_thread
364 )
365 {
366  return ( *scheduler->Operations.node_initialize )( scheduler, the_thread );
367 }
368 
379  const Scheduler_Control *scheduler,
380  Thread_Control *the_thread
381 )
382 {
383  ( *scheduler->Operations.node_destroy )( scheduler, the_thread );
384 }
385 
393  Thread_Control *the_thread,
394  Priority_Control new_priority
395 )
396 {
397  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
398 
399  ( *scheduler->Operations.update_priority )(
400  scheduler,
401  the_thread,
402  new_priority
403  );
404 }
405 
423  const Scheduler_Control *scheduler,
424  Priority_Control p1,
426 )
427 {
428  return ( *scheduler->Operations.priority_compare )( p1, p2 );
429 }
430 
438  Thread_Control *the_thread,
439  uint32_t length
440 )
441 {
442  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
443 
444  ( *scheduler->Operations.release_job )( scheduler, the_thread, length );
445 }
446 
456 {
457  uint32_t cpu_count = _SMP_Get_processor_count();
458  uint32_t cpu_index;
459 
460  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
461  const Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
462  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
463  Thread_Control *executing = cpu->executing;
464 
465  if ( scheduler != NULL && executing != NULL ) {
466  ( *scheduler->Operations.tick )( scheduler, executing );
467  }
468  }
469 }
470 
481  const Scheduler_Control *scheduler,
482  Thread_Control *the_thread,
483  Per_CPU_Control *cpu
484 )
485 {
486  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
487 }
488 
489 #if defined(RTEMS_SMP)
490 RTEMS_INLINE_ROUTINE const Scheduler_Assignment *_Scheduler_Get_assignment(
491  uint32_t cpu_index
492 )
493 {
494  return &_Scheduler_Assignments[ cpu_index ];
495 }
496 
497 RTEMS_INLINE_ROUTINE bool _Scheduler_Is_mandatory_processor(
498  const Scheduler_Assignment *assignment
499 )
500 {
501  return (assignment->attributes & SCHEDULER_ASSIGN_PROCESSOR_MANDATORY) != 0;
502 }
503 
504 RTEMS_INLINE_ROUTINE bool _Scheduler_Should_start_processor(
505  const Scheduler_Assignment *assignment
506 )
507 {
508  return assignment->scheduler != NULL;
509 }
510 #endif /* defined(RTEMS_SMP) */
511 
512 RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
513  const Scheduler_Control *scheduler,
514  uint32_t cpu_index
515 )
516 {
517 #if defined(RTEMS_SMP)
518  const Scheduler_Assignment *assignment =
519  _Scheduler_Get_assignment( cpu_index );
520 
521  return assignment->scheduler == scheduler;
522 #else
523  (void) scheduler;
524  (void) cpu_index;
525 
526  return true;
527 #endif
528 }
529 
530 RTEMS_INLINE_ROUTINE void _Scheduler_Set(
531  const Scheduler_Control *scheduler,
532  Thread_Control *the_thread
533 )
534 {
535 #if defined(RTEMS_SMP)
536  const Scheduler_Control *current_scheduler = _Scheduler_Get( the_thread );
537 
538  if ( current_scheduler != scheduler ) {
539  _Thread_Set_state( the_thread, STATES_MIGRATING );
540  _Scheduler_Node_destroy( current_scheduler, the_thread );
541  the_thread->Scheduler.own_control = scheduler;
542  the_thread->Scheduler.control = scheduler;
543  _Scheduler_Node_initialize( scheduler, the_thread );
544  _Scheduler_Update_priority( the_thread, the_thread->current_priority );
545  _Thread_Clear_state( the_thread, STATES_MIGRATING );
546  }
547 #else
548  (void) scheduler;
549 #endif
550 }
551 
552 #if defined(__RTEMS_HAVE_SYS_CPUSET_H__)
553 
554 RTEMS_INLINE_ROUTINE void _Scheduler_Get_processor_set(
555  const Scheduler_Control *scheduler,
556  size_t cpusetsize,
557  cpu_set_t *cpuset
558 )
559 {
560  uint32_t cpu_count = _SMP_Get_processor_count();
561  uint32_t cpu_index;
562 
563  CPU_ZERO_S( cpusetsize, cpuset );
564 
565  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
566 #if defined(RTEMS_SMP)
567  if ( _Scheduler_Has_processor_ownership( scheduler, cpu_index ) ) {
568  CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
569  }
570 #else
571  (void) scheduler;
572 
573  CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
574 #endif
575  }
576 }
577 
578 RTEMS_INLINE_ROUTINE bool _Scheduler_default_Get_affinity_body(
579  const Scheduler_Control *scheduler,
580  Thread_Control *the_thread,
581  size_t cpusetsize,
582  cpu_set_t *cpuset
583 )
584 {
585  (void) the_thread;
586 
587  _Scheduler_Get_processor_set( scheduler, cpusetsize, cpuset );
588 
589  return true;
590 }
591 
592 bool _Scheduler_Get_affinity(
593  Thread_Control *the_thread,
594  size_t cpusetsize,
595  cpu_set_t *cpuset
596 );
597 
598 RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
599  const Scheduler_Control *scheduler,
600  Thread_Control *the_thread,
601  size_t cpusetsize,
602  const cpu_set_t *cpuset
603 )
604 {
605  uint32_t cpu_count = _SMP_Get_processor_count();
606  uint32_t cpu_index;
607  bool ok = true;
608 
609  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
610 #if defined(RTEMS_SMP)
611  const Scheduler_Control *scheduler_of_cpu =
612  _Scheduler_Get_by_CPU_index( cpu_index );
613 
614  ok = ok
615  && ( CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
616  || ( !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
617  && scheduler != scheduler_of_cpu ) );
618 #else
619  (void) scheduler;
620 
621  ok = ok && CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset );
622 #endif
623  }
624 
625  return ok;
626 }
627 
628 bool _Scheduler_Set_affinity(
629  Thread_Control *the_thread,
630  size_t cpusetsize,
631  const cpu_set_t *cpuset
632 );
633 
634 #endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */
635 
636 RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
637  Thread_Control *new_heir,
638  bool force_dispatch
639 )
640 {
641  Thread_Control *heir = _Thread_Heir;
642 
643  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
644  _Thread_Heir = new_heir;
645  _Thread_Dispatch_necessary = true;
646  }
647 }
648 
649 RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
650  const Scheduler_Control *scheduler,
651  Thread_Control *the_thread,
652  void ( *extract )(
653  const Scheduler_Control *,
654  Thread_Control * ),
655  void ( *schedule )(
656  const Scheduler_Control *,
657  Thread_Control *,
658  bool )
659 )
660 {
661  ( *extract )( scheduler, the_thread );
662 
663  /* TODO: flash critical section? */
664 
665  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
666  ( *schedule )( scheduler, the_thread, true );
667  }
668 }
669 
675  const Scheduler_Control *scheduler,
676  Priority_Control p1,
678 )
679 {
680  return _Scheduler_Priority_compare( scheduler, p1, p2 ) < 0;
681 }
682 
688  const Scheduler_Control *scheduler,
689  Priority_Control p1,
691 )
692 {
693  return _Scheduler_Priority_compare( scheduler, p1, p2 ) > 0;
694 }
695 
696 RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
697  const Scheduler_Control *scheduler
698 )
699 {
700 #if defined(RTEMS_SMP)
701  return _Scheduler_Get_context( scheduler )->processor_count;
702 #else
703  (void) scheduler;
704 
705  return 1;
706 #endif
707 }
708 
709 RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
710 {
711  return _Objects_Build_id(
712  OBJECTS_FAKE_OBJECTS_API,
713  OBJECTS_FAKE_OBJECTS_SCHEDULERS,
715  scheduler_index + 1
716  );
717 }
718 
719 RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
720 {
721  uint32_t minimum_id = _Scheduler_Build_id( 0 );
722 
723  return id - minimum_id;
724 }
725 
726 RTEMS_INLINE_ROUTINE bool _Scheduler_Get_by_id(
727  Objects_Id id,
728  const Scheduler_Control **scheduler_p
729 )
730 {
731  uint32_t index = _Scheduler_Get_index_by_id( id );
732  const Scheduler_Control *scheduler = &_Scheduler_Table[ index ];
733 
734  *scheduler_p = scheduler;
735 
736  return index < _Scheduler_Count
737  && _Scheduler_Get_processor_count( scheduler ) > 0;
738 }
739 
740 RTEMS_INLINE_ROUTINE bool _Scheduler_Is_id_valid( Objects_Id id )
741 {
742  const Scheduler_Control *scheduler;
743  bool ok = _Scheduler_Get_by_id( id, &scheduler );
744 
745  (void) scheduler;
746 
747  return ok;
748 }
749 
750 RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
751  const Scheduler_Control *scheduler
752 )
753 {
754  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
755 }
756 
757 RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_node(
758  const Thread_Control *the_thread
759 )
760 {
761  return the_thread->Scheduler.node;
762 }
763 
764 RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_initialize(
765  Scheduler_Node *node,
766  Thread_Control *the_thread
767 )
768 {
769 #if defined(RTEMS_SMP)
770  node->user = the_thread;
771  node->help_state = SCHEDULER_HELP_YOURSELF;
772  node->owner = the_thread;
773  node->idle = NULL;
774  node->accepts_help = the_thread;
775 #else
776  (void) node;
777  (void) the_thread;
778 #endif
779 }
780 
781 #if defined(RTEMS_SMP)
782 
790 typedef Thread_Control *( *Scheduler_Get_idle_thread )(
791  Scheduler_Context *context
792 );
793 
800 typedef void ( *Scheduler_Release_idle_thread )(
801  Scheduler_Context *context,
802  Thread_Control *idle
803 );
804 
805 RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_owner(
806  const Scheduler_Node *node
807 )
808 {
809  return node->owner;
810 }
811 
812 RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_idle(
813  const Scheduler_Node *node
814 )
815 {
816  return node->idle;
817 }
818 
819 RTEMS_INLINE_ROUTINE void _Scheduler_Node_set_user(
820  Scheduler_Node *node,
821  Thread_Control *user
822 )
823 {
824  node->user = user;
825 }
826 
827 RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_node(
828  Thread_Control *the_thread,
829  Scheduler_Node *node
830 )
831 {
832  the_thread->Scheduler.node = node;
833 }
834 
835 RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_scheduler_and_node(
836  Thread_Control *the_thread,
837  Scheduler_Node *node,
838  const Thread_Control *previous_user_of_node
839 )
840 {
841  const Scheduler_Control *scheduler =
842  _Scheduler_Get_own( previous_user_of_node );
843 
844  the_thread->Scheduler.control = scheduler;
845  _Scheduler_Thread_set_node( the_thread, node );
846 }
847 
848 extern const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ];
849 
850 RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
851  Thread_Control *the_thread,
852  Thread_Scheduler_state new_state
853 )
854 {
855  _Assert(
856  _Scheduler_Thread_state_valid_state_changes
857  [ the_thread->Scheduler.state ][ new_state ]
858  );
859 
860  the_thread->Scheduler.state = new_state;
861 }
862 
871 RTEMS_INLINE_ROUTINE Scheduler_Help_state _Scheduler_Thread_change_help_state(
872  Thread_Control *the_thread,
873  Scheduler_Help_state new_help_state
874 )
875 {
876  Scheduler_Node *node = _Scheduler_Thread_get_own_node( the_thread );
877  Scheduler_Help_state previous_help_state = node->help_state;
878 
879  node->help_state = new_help_state;
880 
881  return previous_help_state;
882 }
883 
900 void _Scheduler_Thread_change_resource_root(
901  Thread_Control *top,
902  Thread_Control *root
903 );
904 
905 RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
906  Scheduler_Node *node,
907  Thread_Control *idle
908 )
909 {
910  _Assert(
911  node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
912  || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
913  );
914  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
915  _Assert(
916  _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
917  );
918 
919  _Scheduler_Thread_set_node( idle, node );
920 
921  _Scheduler_Node_set_user( node, idle );
922  node->idle = idle;
923 }
924 
937 RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
938  Scheduler_Context *context,
939  Scheduler_Node *node,
940  Scheduler_Get_idle_thread get_idle_thread
941 )
942 {
943  Thread_Control *idle = ( *get_idle_thread )( context );
944 
945  _Scheduler_Set_idle_thread( node, idle );
946 
947  return idle;
948 }
949 
950 typedef enum {
951  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
952  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
953  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
954 } Scheduler_Try_to_schedule_action;
955 
967 RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
968 _Scheduler_Try_to_schedule_node(
969  Scheduler_Context *context,
970  Scheduler_Node *node,
971  Thread_Control *idle,
972  Scheduler_Get_idle_thread get_idle_thread
973 )
974 {
975  Scheduler_Try_to_schedule_action action;
976  Thread_Control *owner;
977  Thread_Control *user;
978 
979  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
980 
981  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
982  return action;
983  }
984 
985  owner = _Scheduler_Node_get_owner( node );
986  user = _Scheduler_Node_get_user( node );
987 
988  if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL) {
989  if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
990  _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
991  } else if ( owner->Scheduler.state == THREAD_SCHEDULER_BLOCKED ) {
992  if ( idle != NULL ) {
993  action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
994  } else {
995  _Scheduler_Use_idle_thread( context, node, get_idle_thread );
996  }
997  } else {
998  _Scheduler_Node_set_user( node, owner );
999  }
1000  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1001  if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1002  _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1003  } else if ( idle != NULL ) {
1004  action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
1005  } else {
1006  _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1007  }
1008  } else {
1009  _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
1010 
1011  if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1012  _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1013  } else {
1014  action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
1015  }
1016  }
1017 
1018  return action;
1019 }
1020 
1031 RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
1032  Scheduler_Context *context,
1033  Scheduler_Node *node,
1034  Scheduler_Release_idle_thread release_idle_thread
1035 )
1036 {
1037  Thread_Control *idle = _Scheduler_Node_get_idle( node );
1038 
1039  if ( idle != NULL ) {
1040  Thread_Control *owner = _Scheduler_Node_get_owner( node );
1041 
1042  node->idle = NULL;
1043  _Scheduler_Node_set_user( node, owner );
1044  _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_READY );
1045  _Scheduler_Thread_set_node( idle, idle->Scheduler.own_node );
1046 
1047  ( *release_idle_thread )( context, idle );
1048  }
1049 
1050  return idle;
1051 }
1052 
1053 RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
1054  Scheduler_Node *needs_idle,
1055  Scheduler_Node *uses_idle,
1056  Thread_Control *idle
1057 )
1058 {
1059  uses_idle->idle = NULL;
1060  _Scheduler_Node_set_user(
1061  uses_idle,
1062  _Scheduler_Node_get_owner( uses_idle )
1063  );
1064  _Scheduler_Set_idle_thread( needs_idle, idle );
1065 }
1066 
1081 RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node(
1082  Scheduler_Context *context,
1083  Thread_Control *thread,
1084  Scheduler_Node *node,
1085  bool is_scheduled,
1086  Scheduler_Get_idle_thread get_idle_thread
1087 )
1088 {
1089  Thread_Control *old_user;
1090  Thread_Control *new_user;
1091 
1092  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
1093 
1094  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
1095  _Assert( thread == _Scheduler_Node_get_user( node ) );
1096 
1097  return true;
1098  }
1099 
1100  new_user = NULL;
1101 
1102  if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1103  if ( is_scheduled ) {
1104  _Assert( thread == _Scheduler_Node_get_user( node ) );
1105  old_user = thread;
1106  new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1107  }
1108  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
1109  if ( is_scheduled ) {
1110  old_user = _Scheduler_Node_get_user( node );
1111 
1112  if ( thread == old_user ) {
1113  Thread_Control *owner = _Scheduler_Node_get_owner( node );
1114 
1115  if (
1116  thread != owner
1117  && owner->Scheduler.state == THREAD_SCHEDULER_READY
1118  ) {
1119  new_user = owner;
1120  _Scheduler_Node_set_user( node, new_user );
1121  } else {
1122  new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1123  }
1124  }
1125  }
1126  } else {
1127  /* Not implemented, this is part of the OMIP support path. */
1128  _Assert(0);
1129  }
1130 
1131  if ( new_user != NULL ) {
1132  Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1133 
1134  _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1135  _Thread_Set_CPU( new_user, cpu );
1136  _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
1137  }
1138 
1139  return false;
1140 }
1141 
1154 RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
1155  Scheduler_Context *context,
1156  Thread_Control *the_thread,
1157  Scheduler_Node *node,
1158  bool is_scheduled,
1159  Scheduler_Release_idle_thread release_idle_thread
1160 )
1161 {
1162  bool unblock;
1163 
1164  if ( is_scheduled ) {
1165  Thread_Control *old_user = _Scheduler_Node_get_user( node );
1166  Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1167  Thread_Control *idle = _Scheduler_Release_idle_thread(
1168  context,
1169  node,
1170  release_idle_thread
1171  );
1172  Thread_Control *owner = _Scheduler_Node_get_owner( node );
1173  Thread_Control *new_user;
1174 
1175  if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1176  _Assert( idle != NULL );
1177  new_user = the_thread;
1178  } else if ( idle != NULL ) {
1179  _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1180  new_user = the_thread;
1181  } else if ( the_thread != owner ) {
1182  _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1183  _Assert( old_user != the_thread );
1184  _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_READY );
1185  new_user = the_thread;
1186  _Scheduler_Node_set_user( node, new_user );
1187  } else {
1188  _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1189  _Assert( old_user != the_thread );
1190  _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1191  new_user = NULL;
1192  }
1193 
1194  if ( new_user != NULL ) {
1195  _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1196  _Thread_Set_CPU( new_user, cpu );
1197  _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
1198  }
1199 
1200  unblock = false;
1201  } else {
1202  _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1203 
1204  unblock = true;
1205  }
1206 
1207  return unblock;
1208 }
1209 
1218 RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_ready_node_for_help(
1219  Scheduler_Node *node,
1220  Thread_Control *needs_help
1221 )
1222 {
1223  _Scheduler_Node_set_user( node, needs_help );
1224 
1225  return needs_help;
1226 }
1227 
1243 RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_scheduled_node_for_help(
1244  Scheduler_Context *context,
1245  Scheduler_Node *node,
1246  Thread_Control *offers_help,
1247  Thread_Control *needs_help,
1248  Thread_Control *previous_accepts_help,
1249  Scheduler_Release_idle_thread release_idle_thread
1250 )
1251 {
1252  Thread_Control *next_needs_help = NULL;
1253  Thread_Control *old_user = NULL;
1254  Thread_Control *new_user = NULL;
1255 
1256  if (
1257  previous_accepts_help != needs_help
1258  && _Scheduler_Thread_get_node( previous_accepts_help ) == node
1259  ) {
1260  Thread_Control *idle = _Scheduler_Release_idle_thread(
1261  context,
1262  node,
1263  release_idle_thread
1264  );
1265 
1266  if ( idle != NULL ) {
1267  old_user = idle;
1268  } else {
1269  _Assert( _Scheduler_Node_get_user( node ) == previous_accepts_help );
1270  old_user = previous_accepts_help;
1271  }
1272 
1273  if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1274  new_user = needs_help;
1275  } else {
1276  _Assert(
1277  node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
1278  || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
1279  );
1280  _Assert( offers_help->Scheduler.node == offers_help->Scheduler.own_node );
1281 
1282  new_user = offers_help;
1283  }
1284 
1285  if ( previous_accepts_help != offers_help ) {
1286  next_needs_help = previous_accepts_help;
1287  }
1288  } else if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1289  Thread_Control *idle = _Scheduler_Release_idle_thread(
1290  context,
1291  node,
1292  release_idle_thread
1293  );
1294 
1295  if ( idle != NULL ) {
1296  old_user = idle;
1297  } else {
1298  old_user = _Scheduler_Node_get_user( node );
1299  }
1300 
1301  new_user = needs_help;
1302  } else {
1303  _Assert( needs_help->Scheduler.state == THREAD_SCHEDULER_SCHEDULED );
1304  }
1305 
1306  if ( new_user != old_user ) {
1307  Per_CPU_Control *cpu_self = _Per_CPU_Get();
1308  Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1309 
1310  _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_READY );
1311  _Scheduler_Thread_set_scheduler_and_node(
1312  old_user,
1313  _Scheduler_Thread_get_own_node( old_user ),
1314  old_user
1315  );
1316 
1317  _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1318  _Scheduler_Thread_set_scheduler_and_node( new_user, node, offers_help );
1319 
1320  _Scheduler_Node_set_user( node, new_user );
1321  _Thread_Set_CPU( new_user, cpu );
1322  _Thread_Dispatch_update_heir( cpu_self, cpu, new_user );
1323  }
1324 
1325  return next_needs_help;
1326 }
1327 
1339 RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_blocked_node_for_help(
1340  Scheduler_Context *context,
1341  Scheduler_Node *node,
1342  Thread_Control *offers_help,
1343  Thread_Control *needs_help
1344 )
1345 {
1346  bool enqueue;
1347 
1348  _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
1349 
1350  if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1351  _Scheduler_Node_set_user( node, needs_help );
1352  _Scheduler_Thread_set_scheduler_and_node( needs_help, node, offers_help );
1353 
1354  enqueue = true;
1355  } else {
1356  enqueue = false;
1357  }
1358 
1359  return enqueue;
1360 }
1361 #endif
1362 
1363 ISR_LOCK_DECLARE( extern, _Scheduler_Lock )
1364 
1365 
1372  Thread_Control *the_thread,
1373  ISR_lock_Context *lock_context
1374 )
1375 {
1376  (void) the_thread;
1377  _ISR_lock_ISR_disable_and_acquire( &_Scheduler_Lock, lock_context );
1378 }
1379 
1387  Thread_Control *the_thread,
1388  ISR_lock_Context *lock_context
1389 )
1390 {
1391  (void) the_thread;
1392  _ISR_lock_Release_and_ISR_enable( &_Scheduler_Lock, lock_context );
1393 }
1394 
1397 #ifdef __cplusplus
1398 }
1399 #endif
1400 
1401 #endif
1402 /* end of include file */
RTEMS_INLINE_ROUTINE void _Scheduler_Block(Thread_Control *the_thread)
Blocks a thread with respect to the scheduler.
Definition: schedulerimpl.h:278
RTEMS_INLINE_ROUTINE bool _Scheduler_Is_priority_higher_than(const Scheduler_Control *scheduler, Priority_Control p1, Priority_Control p2)
Returns true if p1 encodes a higher priority than p2 in the intuitive sense of priority.
Definition: schedulerimpl.h:687
RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(Thread_Control *the_thread, uint32_t length)
Releases a job of a thread with respect to the scheduler.
Definition: schedulerimpl.h:437
Scheduler_Void_or_thread(* change_priority)(const Scheduler_Control *, Thread_Control *, Priority_Control, bool)
Definition: scheduler.h:87
Scheduler control.
Definition: scheduler.h:192
Priority_Control current_priority
This field is the current priority state of this thread.
Definition: thread.h:683
Scheduler_Operations Operations
The scheduler operations.
Definition: scheduler.h:201
Scheduler context.
Definition: scheduler.h:180
#define _ISR_lock_ISR_disable_and_acquire(_lock, _context)
Acquires an ISR lock.
Definition: isrlock.h:205
Scheduler_Context * context
Reference to a statically allocated scheduler context.
Definition: scheduler.h:196
#define RTEMS_INLINE_ROUTINE
The following (in conjunction with compiler arguments) are used to choose between the use of static i...
Definition: basedefs.h:135
RTEMS_INLINE_ROUTINE void _Scheduler_Acquire(Thread_Control *the_thread, ISR_lock_Context *lock_context)
Acquires the scheduler instance of the thread.
Definition: schedulerimpl.h:1371
RTEMS_INLINE_ROUTINE bool _Thread_Is_executing(const Thread_Control *the_thread)
This function returns true if the_thread is the currently executing thread, and false otherwise...
Definition: threadimpl.h:517
RTEMS_INLINE_ROUTINE void _Scheduler_Unblock(Thread_Control *the_thread)
Unblocks a thread with respect to the scheduler.
Definition: schedulerimpl.h:295
#define ISR_LOCK_DECLARE(_qualifier, _designator)
Declares an ISR lock variable.
Definition: isrlock.h:104
void(* release_job)(const Scheduler_Control *, Thread_Control *, uint32_t)
Definition: scheduler.h:139
void(* node_initialize)(const Scheduler_Control *, Thread_Control *)
Definition: scheduler.h:120
RTEMS_INLINE_ROUTINE void _Scheduler_Schedule(Thread_Control *the_thread)
The preferred method to add a new scheduler is to define the jump table entries and add a case to the...
Definition: schedulerimpl.h:144
RTEMS_INLINE_ROUTINE Objects_Id _Objects_Build_id(Objects_APIs the_api, uint16_t the_class, uint8_t node, uint16_t index)
This function builds an object&#39;s id from the processor node and index values specified.
Definition: object.h:397
Resource node to reflect ownership of resources and a dependency on a resource.
Definition: resource.h:150
void(* block)(const Scheduler_Control *, Thread_Control *)
Definition: scheduler.h:75
uint32_t Priority_Control
The following type defines the control block used to manage thread priorities.
Definition: priority.h:56
Thread_Scheduler_control Scheduler
Scheduler related control.
Definition: thread.h:756
States_Control _Thread_Set_state(Thread_Control *the_thread, States_Control state)
Sets the specified thread state.
Definition: threadsetstate.c:28
void(* tick)(const Scheduler_Control *, Thread_Control *)
Definition: scheduler.h:146
RTEMS_INLINE_ROUTINE bool _Thread_Is_heir(const Thread_Control *the_thread)
This function returns true if the_thread is the heir thread, and false otherwise. ...
Definition: threadimpl.h:575
RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(const Scheduler_Control *scheduler, Thread_Control *the_thread, Per_CPU_Control *cpu)
Starts the idle thread for a particular processor.
Definition: schedulerimpl.h:480
struct Scheduler_Node * node
The scheduler node of this thread.
Definition: thread.h:609
This structure defines the Thread Control Block (TCB).
Definition: thread.h:671
Per CPU Core Structure.
Definition: percpu.h:233
RTEMS_INLINE_ROUTINE void _Scheduler_Release(Thread_Control *the_thread, ISR_lock_Context *lock_context)
Releases the scheduler instance of the thread.
Definition: schedulerimpl.h:1386
int(* priority_compare)(Priority_Control, Priority_Control)
Definition: scheduler.h:133
Implementation Helper for CPU Set.
Constants and Structures Associated with the Scheduler.
void(* schedule)(const Scheduler_Control *, Thread_Control *)
Definition: scheduler.h:66
#define _ISR_lock_Release_and_ISR_enable(_lock, _context)
Releases an ISR lock.
Definition: isrlock.h:230
void(* node_destroy)(const Scheduler_Control *, Thread_Control *)
Definition: scheduler.h:123
#define _Assert(_e)
Assertion similar to assert() controlled via RTEMS_DEBUG instead of NDEBUG.
Definition: assert.h:83
void _Resource_Iterate(Resource_Node *top, Resource_Node_visitor visitor, void *arg)
Iterates over all nodes of a resource dependency tree.
Definition: resourceiterate.c:28
RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(const Scheduler_Control *scheduler, Thread_Control *the_thread)
Initializes a scheduler node.
Definition: schedulerimpl.h:361
void(* update_priority)(const Scheduler_Control *, Thread_Control *, Priority_Control)
Definition: scheduler.h:126
struct Thread_Control * executing
This is the thread executing on this processor.
Definition: percpu.h:278
RTEMS_INLINE_ROUTINE int _Scheduler_Priority_compare(const Scheduler_Control *scheduler, Priority_Control p1, Priority_Control p2)
Compares two priority values.
Definition: schedulerimpl.h:422
Scheduler_Void_or_thread(* yield)(const Scheduler_Control *, Thread_Control *)
Definition: scheduler.h:69
#define STATES_MIGRATING
This macro corresponds to a task migrating to another scheduler.
Definition: statesimpl.h:84
RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority(Thread_Control *the_thread, Priority_Control new_priority)
Updates the scheduler about a priority change of a not ready thread.
Definition: schedulerimpl.h:392
RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(const Scheduler_Control *scheduler, Thread_Control *the_thread)
Destroys a scheduler node.
Definition: schedulerimpl.h:378
RTEMS_INLINE_ROUTINE bool _Scheduler_Is_priority_lower_than(const Scheduler_Control *scheduler, Priority_Control p1, Priority_Control p2)
Returns true if p1 encodes a lower priority than p2 in the intuitive sense of priority.
Definition: schedulerimpl.h:674
Scheduler node for per-thread data.
Definition: scheduler.h:281
const Scheduler_Control _Scheduler_Table[]
Registered schedulers.
Scheduler_Void_or_thread(* unblock)(const Scheduler_Control *, Thread_Control *)
Definition: scheduler.h:81
Inlined Routines from the Thread Handler.
RTEMS_INLINE_ROUTINE void _Scheduler_Change_priority(Thread_Control *the_thread, Priority_Control new_priority, bool prepend_it)
Propagates a priority change of a thread to the scheduler.
Definition: schedulerimpl.h:326
void(* start_idle)(const Scheduler_Control *, Thread_Control *, struct Per_CPU_Control *)
Definition: scheduler.h:149
RTEMS_INLINE_ROUTINE void _Scheduler_Yield(Thread_Control *the_thread)
Scheduler yield with a particular thread.
Definition: schedulerimpl.h:253
States_Control _Thread_Clear_state(Thread_Control *the_thread, States_Control state)
Clears the specified thread state.
Definition: threadclearstate.c:25
Local ISR lock context for acquire and release pairs.
Definition: isrlock.h:65
uint32_t Objects_Id
The following type defines the control block used to manage object IDs.
Definition: object.h:122
void _Scheduler_Handler_initialization(void)
Initializes the scheduler to the policy chosen by the user.
Definition: scheduler.c:25
#define _Objects_Local_node
The following is referenced to the node number of the local node.
Definition: objectimpl.h:183
bool is_preemptible
This field is true if the thread is preemptible.
Definition: thread.h:749
SuperCore SMP Implementation.
RTEMS_INLINE_ROUTINE void _Scheduler_Tick(void)
Scheduler method invoked at each clock tick.
Definition: schedulerimpl.h:455
#define _Scheduler_Count
Count of registered schedulers.
Definition: scheduler.h:352