RTEMS  5.0.0
schedulerimpl.h
Go to the documentation of this file.
1 
10 /*
11  * Copyright (C) 2010 Gedare Bloom.
12  * Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
13  * Copyright (c) 2014, 2017 embedded brains GmbH
14  *
15  * The license and distribution terms for this file may be
16  * found in the file LICENSE in this distribution or at
17  * http://www.rtems.org/license/LICENSE.
18  */
19 
20 #ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
21 #define _RTEMS_SCORE_SCHEDULERIMPL_H
22 
23 #include <rtems/score/scheduler.h>
24 #include <rtems/score/assert.h>
25 #include <rtems/score/priorityimpl.h>
26 #include <rtems/score/smpimpl.h>
27 #include <rtems/score/status.h>
28 #include <rtems/score/threadimpl.h>
29 
30 #ifdef __cplusplus
31 extern "C" {
32 #endif
33 
42 #define SCHEDULER_PRIORITY_MAP( priority ) ( ( priority ) << 1 )
43 
47 #define SCHEDULER_PRIORITY_UNMAP( priority ) ( ( priority ) >> 1 )
48 
52 #define SCHEDULER_PRIORITY_PURIFY( priority ) \
53  ( ( priority ) & ~( (Priority_Control) SCHEDULER_PRIORITY_APPEND_FLAG ) )
54 
58 #define SCHEDULER_PRIORITY_APPEND( priority ) \
59  ( ( priority ) | SCHEDULER_PRIORITY_APPEND_FLAG )
60 
66 #define SCHEDULER_PRIORITY_IS_APPEND( priority ) \
67  ( ( ( priority ) & SCHEDULER_PRIORITY_APPEND_FLAG ) != 0 )
68 
77 
78 RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
79  const Scheduler_Control *scheduler
80 )
81 {
82  return scheduler->context;
83 }
84 
85 RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
86  const Per_CPU_Control *cpu
87 )
88 {
89 #if defined(RTEMS_SMP)
90  return cpu->Scheduler.control;
91 #else
92  (void) cpu;
93  return &_Scheduler_Table[ 0 ];
94 #endif
95 }
96 
106  const Scheduler_Control *scheduler,
107  ISR_lock_Context *lock_context
108 )
109 {
110 #if defined(RTEMS_SMP)
112 
113  context = _Scheduler_Get_context( scheduler );
114  _ISR_lock_Acquire( &context->Lock, lock_context );
115 #else
116  (void) scheduler;
117  (void) lock_context;
118 #endif
119 }
120 
130  const Scheduler_Control *scheduler,
131  ISR_lock_Context *lock_context
132 )
133 {
134 #if defined(RTEMS_SMP)
136 
137  context = _Scheduler_Get_context( scheduler );
138  _ISR_lock_Release( &context->Lock, lock_context );
139 #else
140  (void) scheduler;
141  (void) lock_context;
142 #endif
143 }
144 
145 #if defined(RTEMS_SMP)
146 void _Scheduler_Request_ask_for_help( Thread_Control *the_thread );
147 
159 RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help( Thread_Control *the_thread )
160 {
161  _Assert( _Thread_State_is_owner( the_thread ) );
162 
163  if ( the_thread->Scheduler.helping_nodes > 0 ) {
164  _Scheduler_Request_ask_for_help( the_thread );
165  }
166 }
167 #endif
168 
177 /*
178  * Passing the Scheduler_Control* to these functions allows for multiple
179  * scheduler's to exist simultaneously, which could be useful on an SMP
180  * system. Then remote Schedulers may be accessible. How to protect such
181  * accesses remains an open problem.
182  */
183 
193 {
194  const Scheduler_Control *scheduler;
195  ISR_lock_Context lock_context;
196 
197  scheduler = _Thread_Scheduler_get_home( the_thread );
198  _Scheduler_Acquire_critical( scheduler, &lock_context );
199 
200  ( *scheduler->Operations.schedule )( scheduler, the_thread );
201 
202  _Scheduler_Release_critical( scheduler, &lock_context );
203 }
204 
214 {
215  const Scheduler_Control *scheduler;
216  ISR_lock_Context lock_context;
217 
218  scheduler = _Thread_Scheduler_get_home( the_thread );
219  _Scheduler_Acquire_critical( scheduler, &lock_context );
220  ( *scheduler->Operations.yield )(
221  scheduler,
222  the_thread,
223  _Thread_Scheduler_get_home_node( the_thread )
224  );
225  _Scheduler_Release_critical( scheduler, &lock_context );
226 }
227 
239 {
240 #if defined(RTEMS_SMP)
241  Chain_Node *node;
242  const Chain_Node *tail;
243  Scheduler_Node *scheduler_node;
244  const Scheduler_Control *scheduler;
245  ISR_lock_Context lock_context;
246 
247  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
248  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
249 
250  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
251  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
252 
253  _Scheduler_Acquire_critical( scheduler, &lock_context );
254  ( *scheduler->Operations.block )(
255  scheduler,
256  the_thread,
257  scheduler_node
258  );
259  _Scheduler_Release_critical( scheduler, &lock_context );
260 
261  node = _Chain_Next( node );
262 
263  while ( node != tail ) {
264  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
265  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
266 
267  _Scheduler_Acquire_critical( scheduler, &lock_context );
268  ( *scheduler->Operations.withdraw_node )(
269  scheduler,
270  the_thread,
271  scheduler_node,
272  THREAD_SCHEDULER_BLOCKED
273  );
274  _Scheduler_Release_critical( scheduler, &lock_context );
275 
276  node = _Chain_Next( node );
277  }
278 #else
279  const Scheduler_Control *scheduler;
280 
281  scheduler = _Thread_Scheduler_get_home( the_thread );
282  ( *scheduler->Operations.block )(
283  scheduler,
284  the_thread,
285  _Thread_Scheduler_get_home_node( the_thread )
286  );
287 #endif
288 }
289 
301 {
302  Scheduler_Node *scheduler_node;
303  const Scheduler_Control *scheduler;
304  ISR_lock_Context lock_context;
305 
306 #if defined(RTEMS_SMP)
307  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE(
308  _Chain_First( &the_thread->Scheduler.Scheduler_nodes )
309  );
310  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
311 #else
312  scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
313  scheduler = _Thread_Scheduler_get_home( the_thread );
314 #endif
315 
316  _Scheduler_Acquire_critical( scheduler, &lock_context );
317  ( *scheduler->Operations.unblock )( scheduler, the_thread, scheduler_node );
318  _Scheduler_Release_critical( scheduler, &lock_context );
319 }
320 
336 {
337 #if defined(RTEMS_SMP)
338  Chain_Node *node;
339  const Chain_Node *tail;
340 
341  _Thread_Scheduler_process_requests( the_thread );
342 
343  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
344  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
345 
346  do {
347  Scheduler_Node *scheduler_node;
348  const Scheduler_Control *scheduler;
349  ISR_lock_Context lock_context;
350 
351  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
352  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
353 
354  _Scheduler_Acquire_critical( scheduler, &lock_context );
355  ( *scheduler->Operations.update_priority )(
356  scheduler,
357  the_thread,
358  scheduler_node
359  );
360  _Scheduler_Release_critical( scheduler, &lock_context );
361 
362  node = _Chain_Next( node );
363  } while ( node != tail );
364 #else
365  const Scheduler_Control *scheduler;
366 
367  scheduler = _Thread_Scheduler_get_home( the_thread );
368  ( *scheduler->Operations.update_priority )(
369  scheduler,
370  the_thread,
371  _Thread_Scheduler_get_home_node( the_thread )
372  );
373 #endif
374 }
375 
376 #if defined(RTEMS_SMP)
377 
385 RTEMS_INLINE_ROUTINE void _Scheduler_Priority_and_sticky_update(
386  Thread_Control *the_thread,
387  int sticky_level_change
388 )
389 {
390  Chain_Node *node;
391  const Chain_Node *tail;
392  Scheduler_Node *scheduler_node;
393  const Scheduler_Control *scheduler;
394  ISR_lock_Context lock_context;
395 
396  _Thread_Scheduler_process_requests( the_thread );
397 
398  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
399  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
400  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
401 
402  _Scheduler_Acquire_critical( scheduler, &lock_context );
403 
404  scheduler_node->sticky_level += sticky_level_change;
405  _Assert( scheduler_node->sticky_level >= 0 );
406 
407  ( *scheduler->Operations.update_priority )(
408  scheduler,
409  the_thread,
410  scheduler_node
411  );
412 
413  _Scheduler_Release_critical( scheduler, &lock_context );
414 
415  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
416  node = _Chain_Next( node );
417 
418  while ( node != tail ) {
419  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
420  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
421 
422  _Scheduler_Acquire_critical( scheduler, &lock_context );
423  ( *scheduler->Operations.update_priority )(
424  scheduler,
425  the_thread,
426  scheduler_node
427  );
428  _Scheduler_Release_critical( scheduler, &lock_context );
429 
430  node = _Chain_Next( node );
431  }
432 }
433 #endif
434 
449  const Scheduler_Control *scheduler,
450  Priority_Control priority
451 )
452 {
453  return ( *scheduler->Operations.map_priority )( scheduler, priority );
454 }
455 
465  const Scheduler_Control *scheduler,
466  Priority_Control priority
467 )
468 {
469  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
470 }
471 
486  const Scheduler_Control *scheduler,
487  Scheduler_Node *node,
488  Thread_Control *the_thread,
489  Priority_Control priority
490 )
491 {
492  ( *scheduler->Operations.node_initialize )(
493  scheduler,
494  node,
495  the_thread,
496  priority
497  );
498 }
499 
510  const Scheduler_Control *scheduler,
511  Scheduler_Node *node
512 )
513 {
514  ( *scheduler->Operations.node_destroy )( scheduler, node );
515 }
516 
527  Thread_Control *the_thread,
528  Priority_Node *priority_node,
529  uint64_t deadline,
530  Thread_queue_Context *queue_context
531 )
532 {
533  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
534 
535  _Thread_queue_Context_clear_priority_updates( queue_context );
536  ( *scheduler->Operations.release_job )(
537  scheduler,
538  the_thread,
539  priority_node,
540  deadline,
541  queue_context
542  );
543 }
544 
554  Thread_Control *the_thread,
555  Priority_Node *priority_node,
556  Thread_queue_Context *queue_context
557 )
558 {
559  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
560 
561  _Thread_queue_Context_clear_priority_updates( queue_context );
562  ( *scheduler->Operations.cancel_job )(
563  scheduler,
564  the_thread,
565  priority_node,
566  queue_context
567  );
568 }
569 
579 {
580  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
581  Thread_Control *executing = cpu->executing;
582 
583  if ( scheduler != NULL && executing != NULL ) {
584  ( *scheduler->Operations.tick )( scheduler, executing );
585  }
586 }
587 
598  const Scheduler_Control *scheduler,
599  Thread_Control *the_thread,
600  Per_CPU_Control *cpu
601 )
602 {
603  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
604 }
605 
606 RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
607  const Scheduler_Control *scheduler,
608  uint32_t cpu_index
609 )
610 {
611 #if defined(RTEMS_SMP)
612  const Per_CPU_Control *cpu;
613  const Scheduler_Control *scheduler_of_cpu;
614 
615  cpu = _Per_CPU_Get_by_index( cpu_index );
616  scheduler_of_cpu = _Scheduler_Get_by_CPU( cpu );
617 
618  return scheduler_of_cpu == scheduler;
619 #else
620  (void) scheduler;
621  (void) cpu_index;
622 
623  return true;
624 #endif
625 }
626 
627 RTEMS_INLINE_ROUTINE const Processor_mask *_Scheduler_Get_processors(
628  const Scheduler_Control *scheduler
629 )
630 {
631 #if defined(RTEMS_SMP)
632  return &_Scheduler_Get_context( scheduler )->Processors;
633 #else
634  return &_Processor_mask_The_one_and_only;
635 #endif
636 }
637 
638 bool _Scheduler_Get_affinity(
639  Thread_Control *the_thread,
640  size_t cpusetsize,
641  cpu_set_t *cpuset
642 );
643 
644 RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
645  const Scheduler_Control *scheduler,
646  Thread_Control *the_thread,
647  Scheduler_Node *node,
648  const Processor_mask *affinity
649 )
650 {
651  (void) scheduler;
652  (void) the_thread;
653  (void) node;
654  return _Processor_mask_Is_subset( affinity, _SMP_Get_online_processors() );
655 }
656 
657 bool _Scheduler_Set_affinity(
658  Thread_Control *the_thread,
659  size_t cpusetsize,
660  const cpu_set_t *cpuset
661 );
662 
663 RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
664  const Scheduler_Control *scheduler,
665  Thread_Control *the_thread,
666  Scheduler_Node *node,
667  void ( *extract )(
668  const Scheduler_Control *,
669  Thread_Control *,
671  ),
672  void ( *schedule )(
673  const Scheduler_Control *,
674  Thread_Control *,
675  bool
676  )
677 )
678 {
679  ( *extract )( scheduler, the_thread, node );
680 
681  /* TODO: flash critical section? */
682 
683  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
684  ( *schedule )( scheduler, the_thread, true );
685  }
686 }
687 
688 RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
689  const Scheduler_Control *scheduler
690 )
691 {
692 #if defined(RTEMS_SMP)
693  const Scheduler_Context *context = _Scheduler_Get_context( scheduler );
694 
695  return _Processor_mask_Count( &context->Processors );
696 #else
697  (void) scheduler;
698 
699  return 1;
700 #endif
701 }
702 
703 RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
704 {
705  return _Objects_Build_id(
706  OBJECTS_FAKE_OBJECTS_API,
707  OBJECTS_FAKE_OBJECTS_SCHEDULERS,
709  (uint16_t) ( scheduler_index + 1 )
710  );
711 }
712 
713 RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
714 {
715  uint32_t minimum_id = _Scheduler_Build_id( 0 );
716 
717  return id - minimum_id;
718 }
719 
720 RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_id(
721  Objects_Id id
722 )
723 {
724  uint32_t index;
725 
726  index = _Scheduler_Get_index_by_id( id );
727 
728  if ( index >= _Scheduler_Count ) {
729  return NULL;
730  }
731 
732  return &_Scheduler_Table[ index ];
733 }
734 
735 RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
736  const Scheduler_Control *scheduler
737 )
738 {
739  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
740 }
741 
742 #if defined(RTEMS_SMP)
743 
751 typedef Thread_Control *( *Scheduler_Get_idle_thread )(
753 );
754 
761 typedef void ( *Scheduler_Release_idle_thread )(
763  Thread_Control *idle
764 );
765 
766 RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
767  Thread_Control *the_thread,
768  Thread_Scheduler_state new_state
769 )
770 {
771  _Assert(
772  _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
773  || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
774  || !_System_state_Is_up( _System_state_Get() )
775  );
776 
777  the_thread->Scheduler.state = new_state;
778 }
779 
780 RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
781  Scheduler_Node *node,
782  Thread_Control *idle
783 )
784 {
785  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
786  _Assert(
787  _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
788  );
789 
790  _Scheduler_Node_set_user( node, idle );
791  node->idle = idle;
792 }
793 
807 RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
808  Scheduler_Context *context,
809  Scheduler_Node *node,
810  Per_CPU_Control *cpu,
811  Scheduler_Get_idle_thread get_idle_thread
812 )
813 {
814  Thread_Control *idle = ( *get_idle_thread )( context );
815 
816  _Scheduler_Set_idle_thread( node, idle );
817  _Thread_Set_CPU( idle, cpu );
818  return idle;
819 }
820 
821 typedef enum {
822  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
823  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
824  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
825 } Scheduler_Try_to_schedule_action;
826 
838 RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
839 _Scheduler_Try_to_schedule_node(
840  Scheduler_Context *context,
841  Scheduler_Node *node,
842  Thread_Control *idle,
843  Scheduler_Get_idle_thread get_idle_thread
844 )
845 {
846  ISR_lock_Context lock_context;
847  Scheduler_Try_to_schedule_action action;
848  Thread_Control *owner;
849 
850  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
851  owner = _Scheduler_Node_get_owner( node );
852  _Assert( _Scheduler_Node_get_user( node ) == owner );
853  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
854 
855  _Thread_Scheduler_acquire_critical( owner, &lock_context );
856 
857  if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
858  _Thread_Scheduler_cancel_need_for_help( owner, _Thread_Get_CPU( owner ) );
859  _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
860  } else if (
861  owner->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
862  && node->sticky_level <= 1
863  ) {
864  action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
865  } else if ( node->sticky_level == 0 ) {
866  action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
867  } else if ( idle != NULL ) {
868  action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
869  } else {
870  _Scheduler_Use_idle_thread(
871  context,
872  node,
873  _Thread_Get_CPU( owner ),
874  get_idle_thread
875  );
876  }
877 
878  _Thread_Scheduler_release_critical( owner, &lock_context );
879  return action;
880 }
881 
892 RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
893  Scheduler_Context *context,
894  Scheduler_Node *node,
895  Scheduler_Release_idle_thread release_idle_thread
896 )
897 {
898  Thread_Control *idle = _Scheduler_Node_get_idle( node );
899 
900  if ( idle != NULL ) {
901  Thread_Control *owner = _Scheduler_Node_get_owner( node );
902 
903  node->idle = NULL;
904  _Scheduler_Node_set_user( node, owner );
905  ( *release_idle_thread )( context, idle );
906  }
907 
908  return idle;
909 }
910 
911 RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
912  Scheduler_Node *needs_idle,
913  Scheduler_Node *uses_idle,
914  Thread_Control *idle
915 )
916 {
917  uses_idle->idle = NULL;
918  _Scheduler_Node_set_user(
919  uses_idle,
920  _Scheduler_Node_get_owner( uses_idle )
921  );
922  _Scheduler_Set_idle_thread( needs_idle, idle );
923 }
924 
940 RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
941  Scheduler_Context *context,
942  Thread_Control *thread,
943  Scheduler_Node *node,
944  bool is_scheduled,
945  Scheduler_Get_idle_thread get_idle_thread
946 )
947 {
948  int sticky_level;
949  ISR_lock_Context lock_context;
950  Per_CPU_Control *thread_cpu;
951 
952  sticky_level = node->sticky_level;
953  --sticky_level;
954  node->sticky_level = sticky_level;
955  _Assert( sticky_level >= 0 );
956 
957  _Thread_Scheduler_acquire_critical( thread, &lock_context );
958  thread_cpu = _Thread_Get_CPU( thread );
959  _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu );
960  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
961  _Thread_Scheduler_release_critical( thread, &lock_context );
962 
963  if ( sticky_level > 0 ) {
964  if ( is_scheduled && _Scheduler_Node_get_idle( node ) == NULL ) {
965  Thread_Control *idle;
966 
967  idle = _Scheduler_Use_idle_thread(
968  context,
969  node,
970  thread_cpu,
971  get_idle_thread
972  );
973  _Thread_Dispatch_update_heir( _Per_CPU_Get(), thread_cpu, idle );
974  }
975 
976  return NULL;
977  }
978 
979  _Assert( thread == _Scheduler_Node_get_user( node ) );
980  return thread_cpu;
981 }
982 
983 RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread(
984  Scheduler_Context *context,
985  Thread_Control *the_thread,
986  Scheduler_Node *node,
987  Scheduler_Release_idle_thread release_idle_thread
988 )
989 {
990  Thread_Control *idle;
991  Thread_Control *owner;
992  Per_CPU_Control *cpu;
993 
994  idle = _Scheduler_Node_get_idle( node );
995  owner = _Scheduler_Node_get_owner( node );
996 
997  node->idle = NULL;
998  _Assert( _Scheduler_Node_get_user( node ) == idle );
999  _Scheduler_Node_set_user( node, owner );
1000  ( *release_idle_thread )( context, idle );
1001 
1002  cpu = _Thread_Get_CPU( idle );
1003  _Thread_Set_CPU( the_thread, cpu );
1004  _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
1005 }
1006 
1019 RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
1020  Scheduler_Context *context,
1021  Thread_Control *the_thread,
1022  Scheduler_Node *node,
1023  bool is_scheduled,
1024  Scheduler_Release_idle_thread release_idle_thread
1025 )
1026 {
1027  bool unblock;
1028 
1029  ++node->sticky_level;
1030  _Assert( node->sticky_level > 0 );
1031 
1032  if ( is_scheduled ) {
1033  _Scheduler_Discard_idle_thread(
1034  context,
1035  the_thread,
1036  node,
1037  release_idle_thread
1038  );
1039  _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
1040  unblock = false;
1041  } else {
1042  _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1043  unblock = true;
1044  }
1045 
1046  return unblock;
1047 }
1048 #endif
1049 
1050 RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
1051  Thread_Control *new_heir,
1052  bool force_dispatch
1053 )
1054 {
1055  Thread_Control *heir = _Thread_Heir;
1056 
1057  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1058 #if defined(RTEMS_SMP)
1059  /*
1060  * We need this state only for _Thread_Get_CPU_time_used(). Cannot use
1061  * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1062  * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1063  * schedulers.
1064  */
1065  heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1066  new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1067 #endif
1068  _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
1069  _Thread_Heir = new_heir;
1070  _Thread_Dispatch_necessary = true;
1071  }
1072 }
1073 
1074 RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
1075  const Scheduler_Control *new_scheduler,
1076  Thread_Control *the_thread,
1077  Priority_Control priority
1078 )
1079 {
1080  Scheduler_Node *new_scheduler_node;
1081  Scheduler_Node *old_scheduler_node;
1082 #if defined(RTEMS_SMP)
1083  ISR_lock_Context lock_context;
1084  const Scheduler_Control *old_scheduler;
1085 
1086 #endif
1087 
1088  if ( the_thread->Wait.queue != NULL ) {
1089  return STATUS_RESOURCE_IN_USE;
1090  }
1091 
1092  old_scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1093  _Priority_Plain_extract(
1094  &old_scheduler_node->Wait.Priority,
1095  &the_thread->Real_priority
1096  );
1097 
1098  if (
1099  !_Priority_Is_empty( &old_scheduler_node->Wait.Priority )
1100 #if defined(RTEMS_SMP)
1101  || !_Chain_Has_only_one_node( &the_thread->Scheduler.Wait_nodes )
1102  || the_thread->Scheduler.pin_level != 0
1103 #endif
1104  ) {
1105  _Priority_Plain_insert(
1106  &old_scheduler_node->Wait.Priority,
1107  &the_thread->Real_priority,
1108  the_thread->Real_priority.priority
1109  );
1110  return STATUS_RESOURCE_IN_USE;
1111  }
1112 
1113 #if defined(RTEMS_SMP)
1114  old_scheduler = _Thread_Scheduler_get_home( the_thread );
1115  new_scheduler_node = _Thread_Scheduler_get_node_by_index(
1116  the_thread,
1117  _Scheduler_Get_index( new_scheduler )
1118  );
1119 
1120  _Scheduler_Acquire_critical( new_scheduler, &lock_context );
1121 
1122  if (
1123  _Scheduler_Get_processor_count( new_scheduler ) == 0
1124  || !( *new_scheduler->Operations.set_affinity )(
1125  new_scheduler,
1126  the_thread,
1127  new_scheduler_node,
1128  &the_thread->Scheduler.Affinity
1129  )
1130  ) {
1131  _Scheduler_Release_critical( new_scheduler, &lock_context );
1132  _Priority_Plain_insert(
1133  &old_scheduler_node->Wait.Priority,
1134  &the_thread->Real_priority,
1135  the_thread->Real_priority.priority
1136  );
1137  return STATUS_UNSATISFIED;
1138  }
1139 
1140  _Assert( the_thread->Scheduler.pinned_scheduler == NULL );
1141  the_thread->Scheduler.home_scheduler = new_scheduler;
1142 
1143  _Scheduler_Release_critical( new_scheduler, &lock_context );
1144 
1145  _Thread_Scheduler_process_requests( the_thread );
1146 #else
1147  new_scheduler_node = old_scheduler_node;
1148 #endif
1149 
1150  the_thread->Start.initial_priority = priority;
1151  _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1152  _Priority_Initialize_one(
1153  &new_scheduler_node->Wait.Priority,
1154  &the_thread->Real_priority
1155  );
1156 
1157 #if defined(RTEMS_SMP)
1158  if ( old_scheduler != new_scheduler ) {
1159  States_Control current_state;
1160 
1161  current_state = the_thread->current_state;
1162 
1163  if ( _States_Is_ready( current_state ) ) {
1164  _Scheduler_Block( the_thread );
1165  }
1166 
1167  _Assert( old_scheduler_node->sticky_level == 0 );
1168  _Assert( new_scheduler_node->sticky_level == 0 );
1169 
1170  _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1171  _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1173  &the_thread->Scheduler.Wait_nodes,
1174  &new_scheduler_node->Thread.Wait_node
1175  );
1177  &old_scheduler_node->Thread.Scheduler_node.Chain
1178  );
1179  _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
1181  &the_thread->Scheduler.Scheduler_nodes,
1182  &new_scheduler_node->Thread.Scheduler_node.Chain
1183  );
1184 
1185  _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1186 
1187  if ( _States_Is_ready( current_state ) ) {
1188  _Scheduler_Unblock( the_thread );
1189  }
1190 
1191  return STATUS_SUCCESSFUL;
1192  }
1193 #endif
1194 
1195  _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1196  _Scheduler_Update_priority( the_thread );
1197  return STATUS_SUCCESSFUL;
1198 }
1199 
1202 #ifdef __cplusplus
1203 }
1204 #endif
1205 
1206 #endif
1207 /* end of include file */
RTEMS_INLINE_ROUTINE void _Scheduler_Block(Thread_Control *the_thread)
Blocks a thread with respect to the scheduler.
Definition: schedulerimpl.h:238
RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority(Thread_Control *the_thread)
Propagates a priority change of a thread to the scheduler.
Definition: schedulerimpl.h:335
Definition: chain.h:65
bool is_preemptible
Definition: thread.h:798
Scheduler context.
Definition: scheduler.h:249
Thread_Wait_information Wait
Definition: thread.h:770
Thread queue context for the thread queue methods.
Definition: threadq.h:193
The priority node to build up a priority aggregation.
Definition: priority.h:94
struct _Thread_Control * executing
This is the thread executing on this processor.
Definition: percpu.h:362
void(* block)(const Scheduler_Control *, Thread_Control *, Scheduler_Node *)
Definition: scheduler.h:60
#define _ISR_lock_Acquire(_lock, _context)
Acquires an ISR lock inside an ISR disabled section.
Definition: isrlock.h:278
#define RTEMS_INLINE_ROUTINE
Definition: basedefs.h:65
Thread_Start_information Start
Definition: thread.h:828
RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(const Scheduler_Control *scheduler, Priority_Control priority)
Maps a thread priority from the user domain to the scheduler domain.
Definition: schedulerimpl.h:448
RTEMS_INLINE_ROUTINE bool _Thread_Is_executing(const Thread_Control *the_thread)
Definition: threadimpl.h:645
RTEMS_INLINE_ROUTINE void _Scheduler_Unblock(Thread_Control *the_thread)
Unblocks a thread with respect to the scheduler.
Definition: schedulerimpl.h:300
RTEMS_INLINE_ROUTINE void _Scheduler_Schedule(Thread_Control *the_thread)
General scheduling decision.
Definition: schedulerimpl.h:192
Priority_Control priority
The priority value of this node.
Definition: priority.h:106
Thread_Scheduler_control Scheduler
Scheduler related control.
Definition: thread.h:767
Priority_Control(* unmap_priority)(const Scheduler_Control *, Priority_Control)
Definition: scheduler.h:87
RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(Thread_Control *the_thread, Priority_Node *priority_node, Thread_queue_Context *queue_context)
Cancels a job of a thread with respect to the scheduler.
Definition: schedulerimpl.h:553
RTEMS_INLINE_ROUTINE Chain_Node * _Chain_Next(const Chain_Node *the_node)
Return pointer the next node from this node.
Definition: chainimpl.h:324
Thread_queue_Queue * queue
The current thread queue.
Definition: thread.h:485
RTEMS_INLINE_ROUTINE void _Chain_Extract_unprotected(Chain_Node *the_node)
Extract this node (unprotected).
Definition: chainimpl.h:557
void(* yield)(const Scheduler_Control *, Thread_Control *, Scheduler_Node *)
Definition: scheduler.h:53
void(* node_initialize)(const Scheduler_Control *, Scheduler_Node *, Thread_Control *, Priority_Control)
Definition: scheduler.h:195
RTEMS_INLINE_ROUTINE bool _Processor_mask_Is_subset(const Processor_mask *big, const Processor_mask *small)
Returns true if the processor set small is a subset of processor set big, and false otherwise...
Definition: processormask.h:128
void(* node_destroy)(const Scheduler_Control *, Scheduler_Node *)
Definition: scheduler.h:203
uint64_t Priority_Control
The thread priority control.
Definition: priority.h:66
RTEMS_INLINE_ROUTINE bool _Chain_Is_empty(const Chain_Control *the_chain)
Is the chain empty.
Definition: chainimpl.h:390
RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(const Scheduler_Control *scheduler, ISR_lock_Context *lock_context)
Acquires the scheduler instance inside a critical section (interrupts disabled).
Definition: schedulerimpl.h:105
RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(const Scheduler_Control *scheduler, Scheduler_Node *node)
Destroys a scheduler node.
Definition: schedulerimpl.h:509
RTEMS_INLINE_ROUTINE Chain_Node * _Chain_First(const Chain_Control *the_chain)
Return pointer to chain&#39;s first node.
Definition: chainimpl.h:257
void(* tick)(const Scheduler_Control *, Thread_Control *)
Definition: scheduler.h:223
Definition: thread.h:728
RTEMS_INLINE_ROUTINE bool _Thread_Is_heir(const Thread_Control *the_thread)
Definition: threadimpl.h:673
RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(const Scheduler_Control *scheduler, Thread_Control *the_thread, Per_CPU_Control *cpu)
Starts the idle thread for a particular processor.
Definition: schedulerimpl.h:597
RTEMS_INLINE_ROUTINE bool _Chain_Has_only_one_node(const Chain_Control *the_chain)
Does this chain have only one node.
Definition: chainimpl.h:449
Per CPU Core Structure.
Definition: percpu.h:290
RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(const Scheduler_Control *scheduler, ISR_lock_Context *lock_context)
Releases the scheduler instance inside a critical section (interrupts disabled).
Definition: schedulerimpl.h:129
#define _ISR_lock_Release(_lock, _context)
Releases an ISR lock inside an ISR disabled section.
Definition: isrlock.h:301
RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(const Scheduler_Control *scheduler, Priority_Control priority)
Unmaps a thread priority from the scheduler domain to the user domain.
Definition: schedulerimpl.h:464
void(* schedule)(const Scheduler_Control *, Thread_Control *)
Definition: scheduler.h:50
States_Control current_state
Definition: thread.h:752
void(* unblock)(const Scheduler_Control *, Thread_Control *, Scheduler_Node *)
Definition: scheduler.h:67
RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(Thread_Control *the_thread, Priority_Node *priority_node, uint64_t deadline, Thread_queue_Context *queue_context)
Releases a job of a thread with respect to the scheduler.
Definition: schedulerimpl.h:526
void(* cancel_job)(const Scheduler_Control *, Thread_Control *, Priority_Node *, Thread_queue_Context *)
Definition: scheduler.h:215
Priority_Control initial_priority
Definition: thread.h:196
struct Scheduler_Node::@3976 Wait
Thread wait support block.
uint32_t States_Control
Definition: states.h:41
Priority_Control(* map_priority)(const Scheduler_Control *, Priority_Control)
Definition: scheduler.h:81
void(* update_priority)(const Scheduler_Control *, Thread_Control *, Scheduler_Node *)
Definition: scheduler.h:74
RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(const Scheduler_Control *scheduler, Scheduler_Node *node, Thread_Control *the_thread, Priority_Control priority)
Initializes a scheduler node.
Definition: schedulerimpl.h:485
unsigned context
Definition: tlb.h:108
Scheduler_Operations Operations
The scheduler operations.
Definition: scheduler.h:275
Scheduler control.
Definition: scheduler.h:266
Scheduler node for per-thread data.
Definition: schedulernode.h:65
const Scheduler_Control _Scheduler_Table[]
Registered schedulers.
Inlined Routines from the Thread Handler.
RTEMS_INLINE_ROUTINE void _Scheduler_Tick(const Per_CPU_Control *cpu)
Scheduler method invoked at each clock tick.
Definition: schedulerimpl.h:578
RTEMS_INLINE_ROUTINE bool _States_Is_ready(States_Control the_states)
Definition: statesimpl.h:185
void(* start_idle)(const Scheduler_Control *, Thread_Control *, struct Per_CPU_Control *)
Definition: scheduler.h:226
RTEMS_INLINE_ROUTINE void _Scheduler_Yield(Thread_Control *the_thread)
Scheduler yield with a particular thread.
Definition: schedulerimpl.h:213
#define _Objects_Build_id(the_api, the_class, node, index)
Definition: object.h:312
Scheduler_Context * context
Reference to a statically allocated scheduler context.
Definition: scheduler.h:270
Local ISR lock context for acquire and release pairs.
Definition: isrlock.h:65
RTEMS_INLINE_ROUTINE const Chain_Node * _Chain_Immutable_tail(const Chain_Control *the_chain)
Return pointer to immutable chain tail.
Definition: chainimpl.h:240
uint32_t Objects_Id
Definition: object.h:75
void _Scheduler_Handler_initialization(void)
Initializes the scheduler to the policy chosen by the user.
Definition: scheduler.c:23
Priority_Node Real_priority
The base priority of this thread in its home scheduler instance.
Definition: thread.h:757
Constants and Structures Associated with the Scheduler.
#define _Objects_Local_node
Definition: objectimpl.h:70
void(* release_job)(const Scheduler_Control *, Thread_Control *, Priority_Node *, uint64_t, Thread_queue_Context *)
Definition: scheduler.h:206
RTEMS_INLINE_ROUTINE void _Chain_Initialize_one(Chain_Control *the_chain, Chain_Node *the_node)
Initializes this chain to contain exactly the specified node.
Definition: chainimpl.h:527
SuperCore SMP Implementation.
#define NULL
Requests a GPIO pin group configuration.
Definition: bestcomm_api.h:77
#define _Scheduler_Count
Count of registered schedulers.
Definition: scheduler.h:314