RTEMS CPU Kit with SuperCore  4.11.2
schedulersmpimpl.h
Go to the documentation of this file.
1 
9 /*
10  * Copyright (c) 2013-2015 embedded brains GmbH. All rights reserved.
11  *
12  * embedded brains GmbH
13  * Dornierstr. 4
14  * 82178 Puchheim
15  * Germany
16  * <rtems@embedded-brains.de>
17  *
18  * The license and distribution terms for this file may be
19  * found in the file LICENSE in this distribution or at
20  * http://www.rtems.org/license/LICENSE.
21  */
22 
23 #ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24 #define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25 
27 #include <rtems/score/assert.h>
28 #include <rtems/score/chainimpl.h>
30 
31 #ifdef __cplusplus
32 extern "C" {
33 #endif /* __cplusplus */
34 
277 typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
278  Scheduler_Context *context,
279  Scheduler_Node *node
280 );
281 
282 typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
283  Scheduler_Context *context,
284  Scheduler_Node *filter,
285  Chain_Node_order order
286 );
287 
288 typedef void ( *Scheduler_SMP_Extract )(
289  Scheduler_Context *context,
290  Scheduler_Node *node_to_extract
291 );
292 
293 typedef void ( *Scheduler_SMP_Insert )(
294  Scheduler_Context *context,
295  Scheduler_Node *node_to_insert
296 );
297 
298 typedef void ( *Scheduler_SMP_Move )(
299  Scheduler_Context *context,
300  Scheduler_Node *node_to_move
301 );
302 
303 typedef void ( *Scheduler_SMP_Update )(
304  Scheduler_Context *context,
305  Scheduler_Node *node_to_update,
306  Priority_Control new_priority
307 );
308 
309 typedef Thread_Control *( *Scheduler_SMP_Enqueue )(
310  Scheduler_Context *context,
311  Scheduler_Node *node_to_enqueue,
312  Thread_Control *needs_help
313 );
314 
315 typedef Thread_Control *( *Scheduler_SMP_Enqueue_scheduled )(
316  Scheduler_Context *context,
317  Scheduler_Node *node_to_enqueue
318 );
319 
320 typedef void ( *Scheduler_SMP_Allocate_processor )(
321  Scheduler_Context *context,
322  Thread_Control *scheduled,
323  Thread_Control *victim
324 );
325 
326 static inline bool _Scheduler_SMP_Insert_priority_lifo_order(
327  const Chain_Node *to_insert,
328  const Chain_Node *next
329 )
330 {
331  const Scheduler_SMP_Node *node_to_insert =
332  (const Scheduler_SMP_Node *) to_insert;
333  const Scheduler_SMP_Node *node_next =
334  (const Scheduler_SMP_Node *) next;
335 
336  return node_to_insert->priority <= node_next->priority;
337 }
338 
339 static inline bool _Scheduler_SMP_Insert_priority_fifo_order(
340  const Chain_Node *to_insert,
341  const Chain_Node *next
342 )
343 {
344  const Scheduler_SMP_Node *node_to_insert =
345  (const Scheduler_SMP_Node *) to_insert;
346  const Scheduler_SMP_Node *node_next =
347  (const Scheduler_SMP_Node *) next;
348 
349  return node_to_insert->priority < node_next->priority;
350 }
351 
352 static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
353  Scheduler_Context *context
354 )
355 {
356  return (Scheduler_SMP_Context *) context;
357 }
358 
359 static inline void _Scheduler_SMP_Initialize(
361 )
362 {
363  _Chain_Initialize_empty( &self->Scheduled );
364  _Chain_Initialize_empty( &self->Idle_threads );
365 }
366 
367 static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
368  Thread_Control *thread
369 )
370 {
371  return (Scheduler_SMP_Node *) _Scheduler_Thread_get_node( thread );
372 }
373 
374 static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
375  Thread_Control *thread
376 )
377 {
378  return (Scheduler_SMP_Node *) _Scheduler_Thread_get_own_node( thread );
379 }
380 
381 static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
382  Scheduler_Node *node
383 )
384 {
385  return (Scheduler_SMP_Node *) node;
386 }
387 
388 static inline void _Scheduler_SMP_Node_initialize(
389  Scheduler_SMP_Node *node,
390  Thread_Control *thread
391 )
392 {
393  _Scheduler_Node_do_initialize( &node->Base, thread );
395 }
396 
397 static inline void _Scheduler_SMP_Node_update_priority(
398  Scheduler_SMP_Node *node,
399  Priority_Control new_priority
400 )
401 {
402  node->priority = new_priority;
403 }
404 
405 extern const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ];
406 
407 static inline void _Scheduler_SMP_Node_change_state(
408  Scheduler_SMP_Node *node,
409  Scheduler_SMP_Node_state new_state
410 )
411 {
412  _Assert(
413  _Scheduler_SMP_Node_valid_state_changes[ node->state ][ new_state ]
414  );
415 
416  node->state = new_state;
417 }
418 
419 static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
420  const Scheduler_Context *context,
421  const Per_CPU_Control *cpu
422 )
423 {
424  return cpu->scheduler_context == context;
425 }
426 
427 static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
428  Scheduler_Context *context
429 )
430 {
431  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
432  Thread_Control *idle = (Thread_Control *)
433  _Chain_Get_first_unprotected( &self->Idle_threads );
434 
435  _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
436 
437  return idle;
438 }
439 
440 static inline void _Scheduler_SMP_Release_idle_thread(
441  Scheduler_Context *context,
442  Thread_Control *idle
443 )
444 {
445  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
446 
447  _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
448 }
449 
450 static inline void _Scheduler_SMP_Allocate_processor_lazy(
451  Scheduler_Context *context,
452  Thread_Control *scheduled_thread,
453  Thread_Control *victim_thread
454 )
455 {
456  Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
457  Per_CPU_Control *victim_cpu = _Thread_Get_CPU( victim_thread );
458  Per_CPU_Control *cpu_self = _Per_CPU_Get();
459  Thread_Control *heir;
460 
461  _Assert( _ISR_Get_level() != 0 );
462 
463  if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
464  if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) {
465  heir = scheduled_cpu->heir;
466  _Thread_Dispatch_update_heir(
467  cpu_self,
468  scheduled_cpu,
469  scheduled_thread
470  );
471  } else {
472  /* We have to force a migration to our processor set */
473  _Assert(
474  scheduled_thread->Scheduler.debug_real_cpu->heir != scheduled_thread
475  );
476  heir = scheduled_thread;
477  }
478  } else {
479  heir = scheduled_thread;
480  }
481 
482  if ( heir != victim_thread ) {
483  _Thread_Set_CPU( heir, victim_cpu );
484  _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir );
485  }
486 }
487 
488 static inline void _Scheduler_SMP_Allocate_processor(
489  Scheduler_Context *context,
490  Scheduler_Node *scheduled,
491  Scheduler_Node *victim,
492  Scheduler_SMP_Allocate_processor allocate_processor
493 )
494 {
495  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
496  Thread_Control *victim_thread = _Scheduler_Node_get_user( victim );
497 
498  _Scheduler_SMP_Node_change_state(
499  _Scheduler_SMP_Node_downcast( scheduled ),
501  );
502  _Scheduler_Thread_change_state( scheduled_thread, THREAD_SCHEDULER_SCHEDULED );
503 
504  ( *allocate_processor )( context, scheduled_thread, victim_thread );
505 }
506 
507 static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
508  Scheduler_Context *context,
509  Scheduler_Node *filter,
510  Chain_Node_order order
511 )
512 {
513  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
514  Chain_Control *scheduled = &self->Scheduled;
515  Scheduler_Node *lowest_scheduled =
516  (Scheduler_Node *) _Chain_Last( scheduled );
517 
518  (void) filter;
519  (void) order;
520 
521  _Assert( &lowest_scheduled->Node != _Chain_Tail( scheduled ) );
522 
523  return lowest_scheduled;
524 }
525 
526 static inline Thread_Control *_Scheduler_SMP_Enqueue_to_scheduled(
527  Scheduler_Context *context,
528  Scheduler_Node *node,
529  Scheduler_Node *lowest_scheduled,
530  Scheduler_SMP_Insert insert_scheduled,
531  Scheduler_SMP_Move move_from_scheduled_to_ready,
532  Scheduler_SMP_Allocate_processor allocate_processor
533 )
534 {
535  Thread_Control *needs_help;
536  Scheduler_Try_to_schedule_action action;
537 
538  action = _Scheduler_Try_to_schedule_node(
539  context,
540  node,
541  _Scheduler_Node_get_idle( lowest_scheduled ),
542  _Scheduler_SMP_Get_idle_thread
543  );
544 
545  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
546  Thread_Control *lowest_scheduled_user =
547  _Scheduler_Node_get_user( lowest_scheduled );
548  Thread_Control *idle;
549 
550  _Scheduler_SMP_Node_change_state(
551  _Scheduler_SMP_Node_downcast( lowest_scheduled ),
553  );
554  _Scheduler_Thread_change_state(
555  lowest_scheduled_user,
556  THREAD_SCHEDULER_READY
557  );
558 
559  _Scheduler_SMP_Allocate_processor(
560  context,
561  node,
562  lowest_scheduled,
563  allocate_processor
564  );
565 
566  ( *insert_scheduled )( context, node );
567  ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
568 
569  idle = _Scheduler_Release_idle_thread(
570  context,
571  lowest_scheduled,
572  _Scheduler_SMP_Release_idle_thread
573  );
574  if ( idle == NULL ) {
575  needs_help = lowest_scheduled_user;
576  } else {
577  needs_help = NULL;
578  }
579  } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
580  _Scheduler_SMP_Node_change_state(
581  _Scheduler_SMP_Node_downcast( lowest_scheduled ),
583  );
584  _Scheduler_SMP_Node_change_state(
585  _Scheduler_SMP_Node_downcast( node ),
587  );
588 
589  ( *insert_scheduled )( context, node );
590  ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
591 
592  _Scheduler_Exchange_idle_thread(
593  node,
594  lowest_scheduled,
595  _Scheduler_Node_get_idle( lowest_scheduled )
596  );
597 
598  needs_help = NULL;
599  } else {
600  _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
601  _Scheduler_SMP_Node_change_state(
602  _Scheduler_SMP_Node_downcast( node ),
604  );
605  needs_help = NULL;
606  }
607 
608  return needs_help;
609 }
610 
634 static inline Thread_Control *_Scheduler_SMP_Enqueue_ordered(
635  Scheduler_Context *context,
636  Scheduler_Node *node,
637  Thread_Control *needs_help,
638  Chain_Node_order order,
639  Scheduler_SMP_Insert insert_ready,
640  Scheduler_SMP_Insert insert_scheduled,
641  Scheduler_SMP_Move move_from_scheduled_to_ready,
642  Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled,
643  Scheduler_SMP_Allocate_processor allocate_processor
644 )
645 {
646  Scheduler_Node *lowest_scheduled =
647  ( *get_lowest_scheduled )( context, node, order );
648 
649  if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) {
650  needs_help = _Scheduler_SMP_Enqueue_to_scheduled(
651  context,
652  node,
653  lowest_scheduled,
654  insert_scheduled,
655  move_from_scheduled_to_ready,
656  allocate_processor
657  );
658  } else {
659  ( *insert_ready )( context, node );
660  }
661 
662  return needs_help;
663 }
664 
684 static inline Thread_Control *_Scheduler_SMP_Enqueue_scheduled_ordered(
685  Scheduler_Context *context,
686  Scheduler_Node *node,
687  Chain_Node_order order,
688  Scheduler_SMP_Extract extract_from_ready,
689  Scheduler_SMP_Get_highest_ready get_highest_ready,
690  Scheduler_SMP_Insert insert_ready,
691  Scheduler_SMP_Insert insert_scheduled,
692  Scheduler_SMP_Move move_from_ready_to_scheduled,
693  Scheduler_SMP_Allocate_processor allocate_processor
694 )
695 {
696  Thread_Control *needs_help;
697 
698  do {
699  Scheduler_Node *highest_ready = ( *get_highest_ready )( context, node );
700 
701  /*
702  * The node has been extracted from the scheduled chain. We have to place
703  * it now on the scheduled or ready set.
704  */
705  if ( ( *order )( &node->Node, &highest_ready->Node ) ) {
706  ( *insert_scheduled )( context, node );
707 
708  needs_help = NULL;
709  } else {
710  Scheduler_Try_to_schedule_action action;
711 
712  action = _Scheduler_Try_to_schedule_node(
713  context,
714  highest_ready,
715  _Scheduler_Node_get_idle( node ),
716  _Scheduler_SMP_Get_idle_thread
717  );
718 
719  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
720  Thread_Control *user = _Scheduler_Node_get_user( node );
721  Thread_Control *idle;
722 
723  _Scheduler_SMP_Node_change_state(
724  _Scheduler_SMP_Node_downcast( node ),
726  );
727  _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_READY );
728 
729  _Scheduler_SMP_Allocate_processor(
730  context,
731  highest_ready,
732  node,
733  allocate_processor
734  );
735 
736  ( *insert_ready )( context, node );
737  ( *move_from_ready_to_scheduled )( context, highest_ready );
738 
739  idle = _Scheduler_Release_idle_thread(
740  context,
741  node,
742  _Scheduler_SMP_Release_idle_thread
743  );
744  if ( idle == NULL ) {
745  needs_help = user;
746  } else {
747  needs_help = NULL;
748  }
749  } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
750  _Scheduler_SMP_Node_change_state(
751  _Scheduler_SMP_Node_downcast( node ),
753  );
754  _Scheduler_SMP_Node_change_state(
755  _Scheduler_SMP_Node_downcast( highest_ready ),
757  );
758 
759  ( *insert_ready )( context, node );
760  ( *move_from_ready_to_scheduled )( context, highest_ready );
761 
762  _Scheduler_Exchange_idle_thread(
763  highest_ready,
764  node,
765  _Scheduler_Node_get_idle( node )
766  );
767 
768  needs_help = NULL;
769  } else {
770  _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
771 
772  _Scheduler_SMP_Node_change_state(
773  _Scheduler_SMP_Node_downcast( highest_ready ),
775  );
776 
777  ( *extract_from_ready )( context, highest_ready );
778 
779  continue;
780  }
781  }
782  } while ( false );
783 
784  return needs_help;
785 }
786 
787 static inline void _Scheduler_SMP_Extract_from_scheduled(
788  Scheduler_Node *node
789 )
790 {
791  _Chain_Extract_unprotected( &node->Node );
792 }
793 
794 static inline void _Scheduler_SMP_Schedule_highest_ready(
795  Scheduler_Context *context,
796  Scheduler_Node *victim,
797  Scheduler_SMP_Extract extract_from_ready,
798  Scheduler_SMP_Get_highest_ready get_highest_ready,
799  Scheduler_SMP_Move move_from_ready_to_scheduled,
800  Scheduler_SMP_Allocate_processor allocate_processor
801 )
802 {
803  do {
804  Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
805  Scheduler_Try_to_schedule_action action;
806 
807  action = _Scheduler_Try_to_schedule_node(
808  context,
809  highest_ready,
810  NULL,
811  _Scheduler_SMP_Get_idle_thread
812  );
813 
814  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
815  _Scheduler_SMP_Allocate_processor(
816  context,
817  highest_ready,
818  victim,
819  allocate_processor
820  );
821 
822  ( *move_from_ready_to_scheduled )( context, highest_ready );
823  } else {
824  _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
825 
826  _Scheduler_SMP_Node_change_state(
827  _Scheduler_SMP_Node_downcast( highest_ready ),
829  );
830 
831  ( *extract_from_ready )( context, highest_ready );
832 
833  continue;
834  }
835  } while ( false );
836 }
837 
849 static inline void _Scheduler_SMP_Block(
850  Scheduler_Context *context,
851  Thread_Control *thread,
852  Scheduler_SMP_Extract extract_from_ready,
853  Scheduler_SMP_Get_highest_ready get_highest_ready,
854  Scheduler_SMP_Move move_from_ready_to_scheduled,
855  Scheduler_SMP_Allocate_processor allocate_processor
856 )
857 {
858  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
859  bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED;
860  bool block;
861 
862  _Assert( is_scheduled || node->state == SCHEDULER_SMP_NODE_READY );
863 
864  block = _Scheduler_Block_node(
865  context,
866  thread,
867  &node->Base,
868  is_scheduled,
869  _Scheduler_SMP_Get_idle_thread
870  );
871  if ( block ) {
872  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
873 
874  if ( is_scheduled ) {
875  _Scheduler_SMP_Extract_from_scheduled( &node->Base );
876 
877  _Scheduler_SMP_Schedule_highest_ready(
878  context,
879  &node->Base,
880  extract_from_ready,
881  get_highest_ready,
882  move_from_ready_to_scheduled,
883  allocate_processor
884  );
885  } else {
886  ( *extract_from_ready )( context, &node->Base );
887  }
888  }
889 }
890 
891 static inline Thread_Control *_Scheduler_SMP_Unblock(
892  Scheduler_Context *context,
893  Thread_Control *thread,
894  Scheduler_SMP_Enqueue enqueue_fifo
895 )
896 {
897  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
898  bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED;
899  bool unblock = _Scheduler_Unblock_node(
900  context,
901  thread,
902  &node->Base,
903  is_scheduled,
904  _Scheduler_SMP_Release_idle_thread
905  );
906  Thread_Control *needs_help;
907 
908  if ( unblock ) {
909  if ( node->state == SCHEDULER_SMP_NODE_BLOCKED ) {
910  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
911 
912  needs_help = ( *enqueue_fifo )( context, &node->Base, thread );
913  } else {
915  _Assert(
916  node->Base.help_state == SCHEDULER_HELP_ACTIVE_OWNER
917  || node->Base.help_state == SCHEDULER_HELP_ACTIVE_RIVAL
918  );
919  _Assert( node->Base.idle == NULL );
920 
921  if ( node->Base.accepts_help == thread ) {
922  needs_help = thread;
923  } else {
924  needs_help = NULL;
925  }
926  }
927  } else {
928  needs_help = NULL;
929  }
930 
931  return needs_help;
932 }
933 
934 static inline Thread_Control *_Scheduler_SMP_Change_priority(
935  Scheduler_Context *context,
936  Thread_Control *thread,
937  Priority_Control new_priority,
938  bool prepend_it,
939  Scheduler_SMP_Extract extract_from_ready,
940  Scheduler_SMP_Update update,
941  Scheduler_SMP_Enqueue enqueue_fifo,
942  Scheduler_SMP_Enqueue enqueue_lifo,
943  Scheduler_SMP_Enqueue_scheduled enqueue_scheduled_fifo,
944  Scheduler_SMP_Enqueue_scheduled enqueue_scheduled_lifo
945 )
946 {
947  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_own_node( thread );
948  Thread_Control *needs_help;
949 
950  if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) {
951  _Scheduler_SMP_Extract_from_scheduled( &node->Base );
952 
953  ( *update )( context, &node->Base, new_priority );
954 
955  if ( prepend_it ) {
956  needs_help = ( *enqueue_scheduled_lifo )( context, &node->Base );
957  } else {
958  needs_help = ( *enqueue_scheduled_fifo )( context, &node->Base );
959  }
960  } else if ( node->state == SCHEDULER_SMP_NODE_READY ) {
961  ( *extract_from_ready )( context, &node->Base );
962 
963  ( *update )( context, &node->Base, new_priority );
964 
965  if ( prepend_it ) {
966  needs_help = ( *enqueue_lifo )( context, &node->Base, NULL );
967  } else {
968  needs_help = ( *enqueue_fifo )( context, &node->Base, NULL );
969  }
970  } else {
971  ( *update )( context, &node->Base, new_priority );
972 
973  needs_help = NULL;
974  }
975 
976  return needs_help;
977 }
978 
979 static inline Thread_Control *_Scheduler_SMP_Ask_for_help(
980  Scheduler_Context *context,
981  Thread_Control *offers_help,
982  Thread_Control *needs_help,
983  Scheduler_SMP_Enqueue enqueue_fifo
984 )
985 {
986  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_own_node( offers_help );
987  Thread_Control *next_needs_help = NULL;
988  Thread_Control *previous_accepts_help;
989 
990  previous_accepts_help = node->Base.accepts_help;
991  node->Base.accepts_help = needs_help;
992 
993  switch ( node->state ) {
995  next_needs_help =
996  _Scheduler_Ask_ready_node_for_help( &node->Base, needs_help );
997  break;
999  next_needs_help = _Scheduler_Ask_scheduled_node_for_help(
1000  context,
1001  &node->Base,
1002  offers_help,
1003  needs_help,
1004  previous_accepts_help,
1005  _Scheduler_SMP_Release_idle_thread
1006  );
1007  break;
1009  if (
1010  _Scheduler_Ask_blocked_node_for_help(
1011  context,
1012  &node->Base,
1013  offers_help,
1014  needs_help
1015  )
1016  ) {
1017  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1018 
1019  next_needs_help = ( *enqueue_fifo )(
1020  context,
1021  &node->Base,
1022  needs_help
1023  );
1024  }
1025  break;
1026  }
1027 
1028  return next_needs_help;
1029 }
1030 
1031 static inline Thread_Control *_Scheduler_SMP_Yield(
1032  Scheduler_Context *context,
1033  Thread_Control *thread,
1034  Scheduler_SMP_Extract extract_from_ready,
1035  Scheduler_SMP_Enqueue enqueue_fifo,
1036  Scheduler_SMP_Enqueue_scheduled enqueue_scheduled_fifo
1037 )
1038 {
1039  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
1040  Thread_Control *needs_help;
1041 
1042  if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1043  _Scheduler_SMP_Extract_from_scheduled( &node->Base );
1044 
1045  needs_help = ( *enqueue_scheduled_fifo )( context, &node->Base );
1046  } else {
1047  ( *extract_from_ready )( context, &node->Base );
1048 
1049  needs_help = ( *enqueue_fifo )( context, &node->Base, NULL );
1050  }
1051 
1052  return needs_help;
1053 }
1054 
1055 static inline void _Scheduler_SMP_Insert_scheduled_lifo(
1056  Scheduler_Context *context,
1057  Scheduler_Node *node_to_insert
1058 )
1059 {
1060  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
1061 
1063  &self->Scheduled,
1064  &node_to_insert->Node,
1065  _Scheduler_SMP_Insert_priority_lifo_order
1066  );
1067 }
1068 
1069 static inline void _Scheduler_SMP_Insert_scheduled_fifo(
1070  Scheduler_Context *context,
1071  Scheduler_Node *node_to_insert
1072 )
1073 {
1074  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
1075 
1077  &self->Scheduled,
1078  &node_to_insert->Node,
1079  _Scheduler_SMP_Insert_priority_fifo_order
1080  );
1081 }
1082 
1085 #ifdef __cplusplus
1086 }
1087 #endif /* __cplusplus */
1088 
1089 #endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Scheduler context specialization for SMP schedulers.
Definition: schedulersmp.h:44
This is used to manage each element (node) which is placed on a chain.
Definition: chain.h:65
SMP Scheduler API.
Inlined Routines Associated with the Manipulation of the Priority-Based Scheduling Structures...
This scheduler node is blocked.
Definition: schedulersmp.h:74
Scheduler context.
Definition: scheduler.h:180
struct Thread_Control * heir
This is the heir thread for this processor.
Definition: percpu.h:292
Priority_Control priority
The current priority of thread owning this node.
Definition: schedulersmp.h:112
This is used to manage a chain.
Definition: chain.h:83
RTEMS_INLINE_ROUTINE void _Chain_Extract_unprotected(Chain_Node *the_node)
Extract this node (unprotected).
Definition: chainimpl.h:639
bool(* Chain_Node_order)(const Chain_Node *left, const Chain_Node *right)
Chain node order.
Definition: chainimpl.h:918
#define _ISR_Get_level()
Return current interrupt level.
Definition: isrlevel.h:120
RTEMS_INLINE_ROUTINE void _Chain_Initialize_empty(Chain_Control *the_chain)
Initialize this chain as empty.
Definition: chainimpl.h:613
Scheduler_SMP_Node_state state
The state of this node.
Definition: schedulersmp.h:107
uint32_t Priority_Control
The following type defines the control block used to manage thread priorities.
Definition: priority.h:56
Thread_Scheduler_control Scheduler
Scheduler related control.
Definition: thread.h:756
Objects_Control Object
This field is the object management structure for each thread.
Definition: thread.h:673
This structure defines the Thread Control Block (TCB).
Definition: thread.h:671
Per CPU Core Structure.
Definition: percpu.h:233
Chain_Node Node
This is the chain node portion of an object.
Definition: object.h:234
RTEMS_INLINE_ROUTINE Chain_Node * _Chain_Last(Chain_Control *the_chain)
Return pointer to chain&#39;s last node.
Definition: chainimpl.h:400
This scheduler node is ready.
Definition: schedulersmp.h:92
The scheduler node is scheduled.
Definition: schedulersmp.h:84
Scheduler node specialization for SMP schedulers.
Definition: schedulersmp.h:98
#define _Assert(_e)
Assertion similar to assert() controlled via RTEMS_DEBUG instead of NDEBUG.
Definition: assert.h:83
RTEMS_INLINE_ROUTINE Chain_Node * _Chain_Tail(Chain_Control *the_chain)
Return pointer to chain tail.
Definition: chainimpl.h:333
Chain Handler API.
Scheduler_Node Base
Basic scheduler node.
Definition: schedulersmp.h:102
Scheduler node for per-thread data.
Definition: scheduler.h:281
RTEMS_INLINE_ROUTINE Chain_Node * _Chain_Get_first_unprotected(Chain_Control *the_chain)
Get the first node (unprotected).
Definition: chainimpl.h:667
RTEMS_INLINE_ROUTINE void _Chain_Insert_ordered_unprotected(Chain_Control *chain, Chain_Node *to_insert, Chain_Node_order order)
Inserts a node into the chain according to the order relation.
Definition: chainimpl.h:934
RTEMS_INLINE_ROUTINE void _Chain_Prepend_unprotected(Chain_Control *the_chain, Chain_Node *the_node)
Prepend a node (unprotected).
Definition: chainimpl.h:787
Scheduler_SMP_Node_state
SMP scheduler node states.
Definition: schedulersmp.h:68