RTEMS  5.0.0
schedulersmpimpl.h
Go to the documentation of this file.
1 
9 /*
10  * Copyright (c) 2013, 2017 embedded brains GmbH. All rights reserved.
11  *
12  * embedded brains GmbH
13  * Dornierstr. 4
14  * 82178 Puchheim
15  * Germany
16  * <rtems@embedded-brains.de>
17  *
18  * The license and distribution terms for this file may be
19  * found in the file LICENSE in this distribution or at
20  * http://www.rtems.org/license/LICENSE.
21  */
22 
23 #ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24 #define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25 
27 #include <rtems/score/assert.h>
28 #include <rtems/score/chainimpl.h>
30 #include <rtems/bspIo.h>
31 
32 #ifdef __cplusplus
33 extern "C" {
34 #endif /* __cplusplus */
35 
278 typedef bool ( *Scheduler_SMP_Has_ready )(
280 );
281 
282 typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
284  Scheduler_Node *node
285 );
286 
287 typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
289  Scheduler_Node *filter
290 );
291 
292 typedef void ( *Scheduler_SMP_Extract )(
294  Scheduler_Node *node_to_extract
295 );
296 
297 typedef void ( *Scheduler_SMP_Insert )(
299  Scheduler_Node *node_to_insert,
300  Priority_Control insert_priority
301 );
302 
303 typedef void ( *Scheduler_SMP_Move )(
305  Scheduler_Node *node_to_move
306 );
307 
308 typedef bool ( *Scheduler_SMP_Ask_for_help )(
310  Thread_Control *thread,
311  Scheduler_Node *node
312 );
313 
314 typedef void ( *Scheduler_SMP_Update )(
316  Scheduler_Node *node_to_update,
317  Priority_Control new_priority
318 );
319 
320 typedef void ( *Scheduler_SMP_Set_affinity )(
322  Scheduler_Node *node,
323  void *arg
324 );
325 
326 typedef bool ( *Scheduler_SMP_Enqueue )(
328  Scheduler_Node *node_to_enqueue,
329  Priority_Control priority
330 );
331 
332 typedef void ( *Scheduler_SMP_Allocate_processor )(
334  Scheduler_Node *scheduled,
335  Scheduler_Node *victim,
336  Per_CPU_Control *victim_cpu
337 );
338 
339 typedef void ( *Scheduler_SMP_Register_idle )(
341  Scheduler_Node *idle,
342  Per_CPU_Control *cpu
343 );
344 
345 static inline void _Scheduler_SMP_Do_nothing_register_idle(
346  Scheduler_Context *context,
347  Scheduler_Node *idle,
348  Per_CPU_Control *cpu
349 )
350 {
351  (void) context;
352  (void) idle;
353  (void) cpu;
354 }
355 
356 static inline bool _Scheduler_SMP_Priority_less_equal(
357  const void *to_insert,
358  const Chain_Node *next
359 )
360 {
361  const Priority_Control *priority_to_insert;
362  const Scheduler_SMP_Node *node_next;
363 
364  priority_to_insert = (const Priority_Control *) to_insert;
365  node_next = (const Scheduler_SMP_Node *) next;
366 
367  return *priority_to_insert <= node_next->priority;
368 }
369 
370 static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
371  Scheduler_Context *context
372 )
373 {
374  return (Scheduler_SMP_Context *) context;
375 }
376 
377 static inline void _Scheduler_SMP_Initialize(
379 )
380 {
381  _Chain_Initialize_empty( &self->Scheduled );
382  _Chain_Initialize_empty( &self->Idle_threads );
383 }
384 
385 static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
386  Thread_Control *thread
387 )
388 {
389  return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
390 }
391 
392 static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
393  Thread_Control *thread
394 )
395 {
396  return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
397 }
398 
399 static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
400  Scheduler_Node *node
401 )
402 {
403  return (Scheduler_SMP_Node *) node;
404 }
405 
406 static inline Scheduler_SMP_Node_state _Scheduler_SMP_Node_state(
407  const Scheduler_Node *node
408 )
409 {
410  return ( (const Scheduler_SMP_Node *) node )->state;
411 }
412 
413 static inline Priority_Control _Scheduler_SMP_Node_priority(
414  const Scheduler_Node *node
415 )
416 {
417  return ( (const Scheduler_SMP_Node *) node )->priority;
418 }
419 
420 static inline void _Scheduler_SMP_Node_initialize(
421  const Scheduler_Control *scheduler,
422  Scheduler_SMP_Node *node,
423  Thread_Control *thread,
424  Priority_Control priority
425 )
426 {
427  _Scheduler_Node_do_initialize( scheduler, &node->Base, thread, priority );
429  node->priority = priority;
430 }
431 
432 static inline void _Scheduler_SMP_Node_update_priority(
433  Scheduler_SMP_Node *node,
434  Priority_Control new_priority
435 )
436 {
437  node->priority = new_priority;
438 }
439 
440 static inline void _Scheduler_SMP_Node_change_state(
441  Scheduler_Node *node,
442  Scheduler_SMP_Node_state new_state
443 )
444 {
445  Scheduler_SMP_Node *the_node;
446 
447  the_node = _Scheduler_SMP_Node_downcast( node );
448  the_node->state = new_state;
449 }
450 
451 static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
452  const Scheduler_Context *context,
453  const Per_CPU_Control *cpu
454 )
455 {
456  return cpu->Scheduler.context == context;
457 }
458 
459 static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
460  Scheduler_Context *context
461 )
462 {
463  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
464  Thread_Control *idle = (Thread_Control *)
465  _Chain_Get_first_unprotected( &self->Idle_threads );
466 
467  _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
468 
469  return idle;
470 }
471 
472 static inline void _Scheduler_SMP_Release_idle_thread(
473  Scheduler_Context *context,
474  Thread_Control *idle
475 )
476 {
477  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
478 
479  _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
480 }
481 
482 static inline void _Scheduler_SMP_Exctract_idle_thread(
483  Thread_Control *idle
484 )
485 {
487 }
488 
489 static inline void _Scheduler_SMP_Allocate_processor_lazy(
490  Scheduler_Context *context,
491  Scheduler_Node *scheduled,
492  Scheduler_Node *victim,
493  Per_CPU_Control *victim_cpu
494 )
495 {
496  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
497  Thread_Control *victim_thread = _Scheduler_Node_get_user( victim );
498  Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
499  Per_CPU_Control *cpu_self = _Per_CPU_Get();
500  Thread_Control *heir;
501 
502  _Assert( _ISR_Get_level() != 0 );
503 
504  if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
505  if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) {
506  heir = scheduled_cpu->heir;
507  _Thread_Dispatch_update_heir(
508  cpu_self,
509  scheduled_cpu,
510  scheduled_thread
511  );
512  } else {
513  /* We have to force a migration to our processor set */
514  heir = scheduled_thread;
515  }
516  } else {
517  heir = scheduled_thread;
518  }
519 
520  if ( heir != victim_thread ) {
521  _Thread_Set_CPU( heir, victim_cpu );
522  _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir );
523  }
524 }
525 
526 /*
527  * This method is slightly different from
528  * _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
529  * do. _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
530  * but does not take into account affinity.
531  */
532 static inline void _Scheduler_SMP_Allocate_processor_exact(
533  Scheduler_Context *context,
534  Scheduler_Node *scheduled,
535  Scheduler_Node *victim,
536  Per_CPU_Control *victim_cpu
537 )
538 {
539  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
540  Per_CPU_Control *cpu_self = _Per_CPU_Get();
541 
542  (void) context;
543  (void) victim;
544 
545  _Thread_Set_CPU( scheduled_thread, victim_cpu );
546  _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
547 }
548 
549 static inline void _Scheduler_SMP_Allocate_processor(
550  Scheduler_Context *context,
551  Scheduler_Node *scheduled,
552  Scheduler_Node *victim,
553  Per_CPU_Control *victim_cpu,
554  Scheduler_SMP_Allocate_processor allocate_processor
555 )
556 {
557  _Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED );
558  ( *allocate_processor )( context, scheduled, victim, victim_cpu );
559 }
560 
561 static inline Thread_Control *_Scheduler_SMP_Preempt(
562  Scheduler_Context *context,
563  Scheduler_Node *scheduled,
564  Scheduler_Node *victim,
565  Scheduler_SMP_Allocate_processor allocate_processor
566 )
567 {
568  Thread_Control *victim_thread;
569  ISR_lock_Context lock_context;
570  Per_CPU_Control *victim_cpu;
571 
572  victim_thread = _Scheduler_Node_get_user( victim );
573  _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
574 
575  _Thread_Scheduler_acquire_critical( victim_thread, &lock_context );
576 
577  victim_cpu = _Thread_Get_CPU( victim_thread );
578 
579  if ( victim_thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
580  _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
581 
582  if ( victim_thread->Scheduler.helping_nodes > 0 ) {
583  _Per_CPU_Acquire( victim_cpu );
585  &victim_cpu->Threads_in_need_for_help,
586  &victim_thread->Scheduler.Help_node
587  );
588  _Per_CPU_Release( victim_cpu );
589  }
590  }
591 
592  _Thread_Scheduler_release_critical( victim_thread, &lock_context );
593 
594  _Scheduler_SMP_Allocate_processor(
595  context,
596  scheduled,
597  victim,
598  victim_cpu,
599  allocate_processor
600  );
601 
602  return victim_thread;
603 }
604 
605 static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
606  Scheduler_Context *context,
607  Scheduler_Node *filter
608 )
609 {
610  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
611  Chain_Control *scheduled = &self->Scheduled;
612  Scheduler_Node *lowest_scheduled =
613  (Scheduler_Node *) _Chain_Last( scheduled );
614 
615  (void) filter;
616 
617  _Assert( &lowest_scheduled->Node.Chain != _Chain_Tail( scheduled ) );
618  _Assert(
619  _Chain_Next( &lowest_scheduled->Node.Chain ) == _Chain_Tail( scheduled )
620  );
621 
622  return lowest_scheduled;
623 }
624 
625 static inline void _Scheduler_SMP_Enqueue_to_scheduled(
626  Scheduler_Context *context,
627  Scheduler_Node *node,
628  Priority_Control priority,
629  Scheduler_Node *lowest_scheduled,
630  Scheduler_SMP_Insert insert_scheduled,
631  Scheduler_SMP_Move move_from_scheduled_to_ready,
632  Scheduler_SMP_Allocate_processor allocate_processor
633 )
634 {
635  Scheduler_Try_to_schedule_action action;
636 
637  action = _Scheduler_Try_to_schedule_node(
638  context,
639  node,
640  _Scheduler_Node_get_idle( lowest_scheduled ),
641  _Scheduler_SMP_Get_idle_thread
642  );
643 
644  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
645  _Scheduler_SMP_Preempt(
646  context,
647  node,
648  lowest_scheduled,
649  allocate_processor
650  );
651 
652  ( *insert_scheduled )( context, node, priority );
653  ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
654 
655  _Scheduler_Release_idle_thread(
656  context,
657  lowest_scheduled,
658  _Scheduler_SMP_Release_idle_thread
659  );
660  } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
661  _Scheduler_SMP_Node_change_state(
662  lowest_scheduled,
664  );
665  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
666 
667  ( *insert_scheduled )( context, node, priority );
668  ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
669 
670  _Scheduler_Exchange_idle_thread(
671  node,
672  lowest_scheduled,
673  _Scheduler_Node_get_idle( lowest_scheduled )
674  );
675  } else {
676  _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
677  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
678  }
679 }
680 
703 static inline bool _Scheduler_SMP_Enqueue(
704  Scheduler_Context *context,
705  Scheduler_Node *node,
706  Priority_Control insert_priority,
707  Chain_Node_order order,
708  Scheduler_SMP_Insert insert_ready,
709  Scheduler_SMP_Insert insert_scheduled,
710  Scheduler_SMP_Move move_from_scheduled_to_ready,
711  Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled,
712  Scheduler_SMP_Allocate_processor allocate_processor
713 )
714 {
715  bool needs_help;
716  Scheduler_Node *lowest_scheduled;
717 
718  lowest_scheduled = ( *get_lowest_scheduled )( context, node );
719 
720  if ( ( *order )( &insert_priority, &lowest_scheduled->Node.Chain ) ) {
721  _Scheduler_SMP_Enqueue_to_scheduled(
722  context,
723  node,
724  insert_priority,
725  lowest_scheduled,
726  insert_scheduled,
727  move_from_scheduled_to_ready,
728  allocate_processor
729  );
730  needs_help = false;
731  } else {
732  ( *insert_ready )( context, node, insert_priority );
733  needs_help = true;
734  }
735 
736  return needs_help;
737 }
738 
758 static inline bool _Scheduler_SMP_Enqueue_scheduled(
759  Scheduler_Context *context,
760  Scheduler_Node *const node,
761  Priority_Control insert_priority,
762  Chain_Node_order order,
763  Scheduler_SMP_Extract extract_from_ready,
764  Scheduler_SMP_Get_highest_ready get_highest_ready,
765  Scheduler_SMP_Insert insert_ready,
766  Scheduler_SMP_Insert insert_scheduled,
767  Scheduler_SMP_Move move_from_ready_to_scheduled,
768  Scheduler_SMP_Allocate_processor allocate_processor
769 )
770 {
771  while ( true ) {
772  Scheduler_Node *highest_ready;
773  Scheduler_Try_to_schedule_action action;
774 
775  highest_ready = ( *get_highest_ready )( context, node );
776 
777  /*
778  * The node has been extracted from the scheduled chain. We have to place
779  * it now on the scheduled or ready set.
780  */
781  if (
782  node->sticky_level > 0
783  && ( *order )( &insert_priority, &highest_ready->Node.Chain )
784  ) {
785  ( *insert_scheduled )( context, node, insert_priority );
786 
787  if ( _Scheduler_Node_get_idle( node ) != NULL ) {
788  Thread_Control *owner;
789  ISR_lock_Context lock_context;
790 
791  owner = _Scheduler_Node_get_owner( node );
792  _Thread_Scheduler_acquire_critical( owner, &lock_context );
793 
794  if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
795  _Thread_Scheduler_cancel_need_for_help(
796  owner,
797  _Thread_Get_CPU( owner )
798  );
799  _Scheduler_Discard_idle_thread(
800  context,
801  owner,
802  node,
803  _Scheduler_SMP_Release_idle_thread
804  );
805  _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
806  }
807 
808  _Thread_Scheduler_release_critical( owner, &lock_context );
809  }
810 
811  return false;
812  }
813 
814  action = _Scheduler_Try_to_schedule_node(
815  context,
816  highest_ready,
817  _Scheduler_Node_get_idle( node ),
818  _Scheduler_SMP_Get_idle_thread
819  );
820 
821  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
822  Thread_Control *idle;
823 
824  _Scheduler_SMP_Preempt(
825  context,
826  highest_ready,
827  node,
828  allocate_processor
829  );
830 
831  ( *insert_ready )( context, node, insert_priority );
832  ( *move_from_ready_to_scheduled )( context, highest_ready );
833 
834  idle = _Scheduler_Release_idle_thread(
835  context,
836  node,
837  _Scheduler_SMP_Release_idle_thread
838  );
839  return ( idle == NULL );
840  } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
841  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
842  _Scheduler_SMP_Node_change_state(
843  highest_ready,
845  );
846 
847  ( *insert_ready )( context, node, insert_priority );
848  ( *move_from_ready_to_scheduled )( context, highest_ready );
849 
850  _Scheduler_Exchange_idle_thread(
851  highest_ready,
852  node,
853  _Scheduler_Node_get_idle( node )
854  );
855  return false;
856  } else {
857  _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
858 
859  _Scheduler_SMP_Node_change_state(
860  highest_ready,
862  );
863 
864  ( *extract_from_ready )( context, highest_ready );
865  }
866  }
867 }
868 
869 static inline void _Scheduler_SMP_Extract_from_scheduled(
870  Scheduler_Context *context,
871  Scheduler_Node *node
872 )
873 {
874  (void) context;
875  _Chain_Extract_unprotected( &node->Node.Chain );
876 }
877 
878 static inline void _Scheduler_SMP_Schedule_highest_ready(
879  Scheduler_Context *context,
880  Scheduler_Node *victim,
881  Per_CPU_Control *victim_cpu,
882  Scheduler_SMP_Extract extract_from_ready,
883  Scheduler_SMP_Get_highest_ready get_highest_ready,
884  Scheduler_SMP_Move move_from_ready_to_scheduled,
885  Scheduler_SMP_Allocate_processor allocate_processor
886 )
887 {
888  Scheduler_Try_to_schedule_action action;
889 
890  do {
891  Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
892 
893  action = _Scheduler_Try_to_schedule_node(
894  context,
895  highest_ready,
896  NULL,
897  _Scheduler_SMP_Get_idle_thread
898  );
899 
900  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
901  _Scheduler_SMP_Allocate_processor(
902  context,
903  highest_ready,
904  victim,
905  victim_cpu,
906  allocate_processor
907  );
908 
909  ( *move_from_ready_to_scheduled )( context, highest_ready );
910  } else {
911  _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
912 
913  _Scheduler_SMP_Node_change_state(
914  highest_ready,
916  );
917 
918  ( *extract_from_ready )( context, highest_ready );
919  }
920  } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
921 }
922 
923 static inline void _Scheduler_SMP_Preempt_and_schedule_highest_ready(
924  Scheduler_Context *context,
925  Scheduler_Node *victim,
926  Per_CPU_Control *victim_cpu,
927  Scheduler_SMP_Extract extract_from_ready,
928  Scheduler_SMP_Get_highest_ready get_highest_ready,
929  Scheduler_SMP_Move move_from_ready_to_scheduled,
930  Scheduler_SMP_Allocate_processor allocate_processor
931 )
932 {
933  Scheduler_Try_to_schedule_action action;
934 
935  do {
936  Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
937 
938  action = _Scheduler_Try_to_schedule_node(
939  context,
940  highest_ready,
941  NULL,
942  _Scheduler_SMP_Get_idle_thread
943  );
944 
945  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
946  _Scheduler_SMP_Preempt(
947  context,
948  highest_ready,
949  victim,
950  allocate_processor
951  );
952 
953  ( *move_from_ready_to_scheduled )( context, highest_ready );
954  } else {
955  _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
956 
957  _Scheduler_SMP_Node_change_state(
958  highest_ready,
960  );
961 
962  ( *extract_from_ready )( context, highest_ready );
963  }
964  } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
965 }
966 
981 static inline void _Scheduler_SMP_Block(
982  Scheduler_Context *context,
983  Thread_Control *thread,
984  Scheduler_Node *node,
985  Scheduler_SMP_Extract extract_from_scheduled,
986  Scheduler_SMP_Extract extract_from_ready,
987  Scheduler_SMP_Get_highest_ready get_highest_ready,
988  Scheduler_SMP_Move move_from_ready_to_scheduled,
989  Scheduler_SMP_Allocate_processor allocate_processor
990 )
991 {
992  Scheduler_SMP_Node_state node_state;
993  Per_CPU_Control *thread_cpu;
994 
995  node_state = _Scheduler_SMP_Node_state( node );
996 
997  thread_cpu = _Scheduler_Block_node(
998  context,
999  thread,
1000  node,
1001  node_state == SCHEDULER_SMP_NODE_SCHEDULED,
1002  _Scheduler_SMP_Get_idle_thread
1003  );
1004 
1005  if ( thread_cpu != NULL ) {
1006  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1007 
1008  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1009  ( *extract_from_scheduled )( context, node );
1010  _Scheduler_SMP_Schedule_highest_ready(
1011  context,
1012  node,
1013  thread_cpu,
1014  extract_from_ready,
1015  get_highest_ready,
1016  move_from_ready_to_scheduled,
1017  allocate_processor
1018  );
1019  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1020  ( *extract_from_ready )( context, node );
1021  }
1022  }
1023 }
1024 
1025 static inline void _Scheduler_SMP_Unblock(
1026  Scheduler_Context *context,
1027  Thread_Control *thread,
1028  Scheduler_Node *node,
1029  Scheduler_SMP_Update update,
1030  Scheduler_SMP_Enqueue enqueue
1031 )
1032 {
1033  Scheduler_SMP_Node_state node_state;
1034  bool unblock;
1035 
1036  node_state = _Scheduler_SMP_Node_state( node );
1037  unblock = _Scheduler_Unblock_node(
1038  context,
1039  thread,
1040  node,
1041  node_state == SCHEDULER_SMP_NODE_SCHEDULED,
1042  _Scheduler_SMP_Release_idle_thread
1043  );
1044 
1045  if ( unblock ) {
1046  Priority_Control priority;
1047  bool needs_help;
1048 
1049  priority = _Scheduler_Node_get_priority( node );
1050  priority = SCHEDULER_PRIORITY_PURIFY( priority );
1051 
1052  if ( priority != _Scheduler_SMP_Node_priority( node ) ) {
1053  ( *update )( context, node, priority );
1054  }
1055 
1056  if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1057  Priority_Control insert_priority;
1058 
1059  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1060  insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
1061  needs_help = ( *enqueue )( context, node, insert_priority );
1062  } else {
1063  _Assert( node_state == SCHEDULER_SMP_NODE_READY );
1064  _Assert( node->sticky_level > 0 );
1065  _Assert( node->idle == NULL );
1066  needs_help = true;
1067  }
1068 
1069  if ( needs_help ) {
1070  _Scheduler_Ask_for_help( thread );
1071  }
1072  }
1073 }
1074 
1075 static inline void _Scheduler_SMP_Update_priority(
1076  Scheduler_Context *context,
1077  Thread_Control *thread,
1078  Scheduler_Node *node,
1079  Scheduler_SMP_Extract extract_from_ready,
1080  Scheduler_SMP_Update update,
1081  Scheduler_SMP_Enqueue enqueue,
1082  Scheduler_SMP_Enqueue enqueue_scheduled,
1083  Scheduler_SMP_Ask_for_help ask_for_help
1084 )
1085 {
1086  Priority_Control priority;
1087  Priority_Control insert_priority;
1088  Scheduler_SMP_Node_state node_state;
1089 
1090  insert_priority = _Scheduler_Node_get_priority( node );
1091  priority = SCHEDULER_PRIORITY_PURIFY( insert_priority );
1092 
1093  if ( priority == _Scheduler_SMP_Node_priority( node ) ) {
1094  if ( _Thread_Is_ready( thread ) ) {
1095  ( *ask_for_help )( context, thread, node );
1096  }
1097 
1098  return;
1099  }
1100 
1101  node_state = _Scheduler_SMP_Node_state( node );
1102 
1103  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1104  _Scheduler_SMP_Extract_from_scheduled( context, node );
1105  ( *update )( context, node, priority );
1106  ( *enqueue_scheduled )( context, node, insert_priority );
1107  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1108  ( *extract_from_ready )( context, node );
1109  ( *update )( context, node, priority );
1110  ( *enqueue )( context, node, insert_priority );
1111  } else {
1112  ( *update )( context, node, priority );
1113 
1114  if ( _Thread_Is_ready( thread ) ) {
1115  ( *ask_for_help )( context, thread, node );
1116  }
1117  }
1118 }
1119 
1120 static inline void _Scheduler_SMP_Yield(
1121  Scheduler_Context *context,
1122  Thread_Control *thread,
1123  Scheduler_Node *node,
1124  Scheduler_SMP_Extract extract_from_ready,
1125  Scheduler_SMP_Enqueue enqueue,
1126  Scheduler_SMP_Enqueue enqueue_scheduled
1127 )
1128 {
1129  bool needs_help;
1130  Scheduler_SMP_Node_state node_state;
1131  Priority_Control insert_priority;
1132 
1133  node_state = _Scheduler_SMP_Node_state( node );
1134  insert_priority = _Scheduler_SMP_Node_priority( node );
1135  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1136 
1137  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1138  _Scheduler_SMP_Extract_from_scheduled( context, node );
1139  ( *enqueue_scheduled )( context, node, insert_priority );
1140  needs_help = false;
1141  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1142  ( *extract_from_ready )( context, node );
1143 
1144  needs_help = ( *enqueue )( context, node, insert_priority );
1145  } else {
1146  needs_help = true;
1147  }
1148 
1149  if ( needs_help ) {
1150  _Scheduler_Ask_for_help( thread );
1151  }
1152 }
1153 
1154 static inline void _Scheduler_SMP_Insert_scheduled(
1155  Scheduler_Context *context,
1156  Scheduler_Node *node_to_insert,
1157  Priority_Control priority_to_insert
1158 )
1159 {
1160  Scheduler_SMP_Context *self;
1161 
1162  self = _Scheduler_SMP_Get_self( context );
1163 
1165  &self->Scheduled,
1166  &node_to_insert->Node.Chain,
1167  &priority_to_insert,
1168  _Scheduler_SMP_Priority_less_equal
1169  );
1170 }
1171 
1172 static inline bool _Scheduler_SMP_Ask_for_help(
1173  Scheduler_Context *context,
1174  Thread_Control *thread,
1175  Scheduler_Node *node,
1176  Chain_Node_order order,
1177  Scheduler_SMP_Insert insert_ready,
1178  Scheduler_SMP_Insert insert_scheduled,
1179  Scheduler_SMP_Move move_from_scheduled_to_ready,
1180  Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled,
1181  Scheduler_SMP_Allocate_processor allocate_processor
1182 )
1183 {
1184  Scheduler_Node *lowest_scheduled;
1185  ISR_lock_Context lock_context;
1186  bool success;
1187 
1188  if ( thread->Scheduler.pinned_scheduler != NULL ) {
1189  /*
1190  * Pinned threads are not allowed to ask for help. Return success to break
1191  * the loop in _Thread_Ask_for_help() early.
1192  */
1193  return true;
1194  }
1195 
1196  lowest_scheduled = ( *get_lowest_scheduled )( context, node );
1197 
1198  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1199 
1200  if ( thread->Scheduler.state == THREAD_SCHEDULER_READY ) {
1201  Scheduler_SMP_Node_state node_state;
1202 
1203  node_state = _Scheduler_SMP_Node_state( node );
1204 
1205  if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1206  Priority_Control insert_priority;
1207 
1208  insert_priority = _Scheduler_SMP_Node_priority( node );
1209 
1210  if ( ( *order )( &insert_priority, &lowest_scheduled->Node.Chain ) ) {
1211  _Thread_Scheduler_cancel_need_for_help(
1212  thread,
1213  _Thread_Get_CPU( thread )
1214  );
1215  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1216  _Thread_Scheduler_release_critical( thread, &lock_context );
1217 
1218  _Scheduler_SMP_Preempt(
1219  context,
1220  node,
1221  lowest_scheduled,
1222  allocate_processor
1223  );
1224 
1225  ( *insert_scheduled )( context, node, insert_priority );
1226  ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
1227 
1228  _Scheduler_Release_idle_thread(
1229  context,
1230  lowest_scheduled,
1231  _Scheduler_SMP_Release_idle_thread
1232  );
1233  success = true;
1234  } else {
1235  _Thread_Scheduler_release_critical( thread, &lock_context );
1236  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1237  ( *insert_ready )( context, node, insert_priority );
1238  success = false;
1239  }
1240  } else if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1241  _Thread_Scheduler_cancel_need_for_help(
1242  thread,
1243  _Thread_Get_CPU( thread )
1244  );
1245  _Scheduler_Discard_idle_thread(
1246  context,
1247  thread,
1248  node,
1249  _Scheduler_SMP_Release_idle_thread
1250  );
1251  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1252  _Thread_Scheduler_release_critical( thread, &lock_context );
1253  success = true;
1254  } else {
1255  _Thread_Scheduler_release_critical( thread, &lock_context );
1256  success = false;
1257  }
1258  } else {
1259  _Thread_Scheduler_release_critical( thread, &lock_context );
1260  success = false;
1261  }
1262 
1263  return success;
1264 }
1265 
1266 static inline void _Scheduler_SMP_Reconsider_help_request(
1267  Scheduler_Context *context,
1268  Thread_Control *thread,
1269  Scheduler_Node *node,
1270  Scheduler_SMP_Extract extract_from_ready
1271 )
1272 {
1273  ISR_lock_Context lock_context;
1274 
1275  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1276 
1277  if (
1278  thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
1279  && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_READY
1280  && node->sticky_level == 1
1281  ) {
1282  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1283  ( *extract_from_ready )( context, node );
1284  }
1285 
1286  _Thread_Scheduler_release_critical( thread, &lock_context );
1287 }
1288 
1289 static inline void _Scheduler_SMP_Withdraw_node(
1290  Scheduler_Context *context,
1291  Thread_Control *thread,
1292  Scheduler_Node *node,
1293  Thread_Scheduler_state next_state,
1294  Scheduler_SMP_Extract extract_from_ready,
1295  Scheduler_SMP_Get_highest_ready get_highest_ready,
1296  Scheduler_SMP_Move move_from_ready_to_scheduled,
1297  Scheduler_SMP_Allocate_processor allocate_processor
1298 )
1299 {
1300  ISR_lock_Context lock_context;
1301  Scheduler_SMP_Node_state node_state;
1302 
1303  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1304 
1305  node_state = _Scheduler_SMP_Node_state( node );
1306  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1307 
1308  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1309  Per_CPU_Control *thread_cpu;
1310 
1311  thread_cpu = _Thread_Get_CPU( thread );
1312  _Scheduler_Thread_change_state( thread, next_state );
1313  _Thread_Scheduler_release_critical( thread, &lock_context );
1314 
1315  _Scheduler_SMP_Extract_from_scheduled( context, node );
1316  _Scheduler_SMP_Schedule_highest_ready(
1317  context,
1318  node,
1319  thread_cpu,
1320  extract_from_ready,
1321  get_highest_ready,
1322  move_from_ready_to_scheduled,
1323  allocate_processor
1324  );
1325  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1326  _Thread_Scheduler_release_critical( thread, &lock_context );
1327  ( *extract_from_ready )( context, node );
1328  } else {
1329  _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
1330  _Thread_Scheduler_release_critical( thread, &lock_context );
1331  }
1332 }
1333 
1334 static inline void _Scheduler_SMP_Do_start_idle(
1335  Scheduler_Context *context,
1336  Thread_Control *idle,
1337  Per_CPU_Control *cpu,
1338  Scheduler_SMP_Register_idle register_idle
1339 )
1340 {
1341  Scheduler_SMP_Context *self;
1342  Scheduler_SMP_Node *node;
1343 
1344  self = _Scheduler_SMP_Get_self( context );
1345  node = _Scheduler_SMP_Thread_get_node( idle );
1346 
1347  _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_SCHEDULED );
1349 
1350  _Thread_Set_CPU( idle, cpu );
1351  ( *register_idle )( context, &node->Base, cpu );
1352  _Chain_Append_unprotected( &self->Scheduled, &node->Base.Node.Chain );
1353  _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
1354 }
1355 
1356 static inline void _Scheduler_SMP_Add_processor(
1357  Scheduler_Context *context,
1358  Thread_Control *idle,
1359  Scheduler_SMP_Has_ready has_ready,
1360  Scheduler_SMP_Enqueue enqueue_scheduled,
1361  Scheduler_SMP_Register_idle register_idle
1362 )
1363 {
1364  Scheduler_SMP_Context *self;
1365  Scheduler_Node *node;
1366 
1367  self = _Scheduler_SMP_Get_self( context );
1368  idle->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1369  _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
1370  node = _Thread_Scheduler_get_home_node( idle );
1371  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
1372  ( *register_idle )( context, node, _Thread_Get_CPU( idle ) );
1373 
1374  if ( ( *has_ready )( &self->Base ) ) {
1375  Priority_Control insert_priority;
1376 
1377  insert_priority = _Scheduler_SMP_Node_priority( node );
1378  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1379  ( *enqueue_scheduled )( &self->Base, node, insert_priority );
1380  } else {
1381  _Chain_Append_unprotected( &self->Scheduled, &node->Node.Chain );
1382  }
1383 }
1384 
1385 static inline Thread_Control *_Scheduler_SMP_Remove_processor(
1386  Scheduler_Context *context,
1387  Per_CPU_Control *cpu,
1388  Scheduler_SMP_Extract extract_from_ready,
1389  Scheduler_SMP_Enqueue enqueue
1390 )
1391 {
1392  Scheduler_SMP_Context *self;
1393  Chain_Node *chain_node;
1394  Scheduler_Node *victim_node;
1395  Thread_Control *victim_user;
1396  Thread_Control *victim_owner;
1397  Thread_Control *idle;
1398 
1399  self = _Scheduler_SMP_Get_self( context );
1400  chain_node = _Chain_First( &self->Scheduled );
1401 
1402  do {
1403  _Assert( chain_node != _Chain_Immutable_tail( &self->Scheduled ) );
1404  victim_node = (Scheduler_Node *) chain_node;
1405  victim_user = _Scheduler_Node_get_user( victim_node );
1406  chain_node = _Chain_Next( chain_node );
1407  } while ( _Thread_Get_CPU( victim_user ) != cpu );
1408 
1409  _Scheduler_SMP_Extract_from_scheduled( context, victim_node );
1410  victim_owner = _Scheduler_Node_get_owner( victim_node );
1411 
1412  if ( !victim_owner->is_idle ) {
1413  Scheduler_Node *idle_node;
1414 
1415  _Scheduler_Release_idle_thread(
1416  &self->Base,
1417  victim_node,
1418  _Scheduler_SMP_Release_idle_thread
1419  );
1420  idle = _Scheduler_SMP_Get_idle_thread( &self->Base );
1421  idle_node = _Thread_Scheduler_get_home_node( idle );
1422  ( *extract_from_ready )( &self->Base, idle_node );
1423  _Scheduler_SMP_Preempt(
1424  &self->Base,
1425  idle_node,
1426  victim_node,
1427  _Scheduler_SMP_Allocate_processor_exact
1428  );
1429 
1430  if ( !_Chain_Is_empty( &self->Scheduled ) ) {
1431  Priority_Control insert_priority;
1432 
1433  insert_priority = _Scheduler_SMP_Node_priority( victim_node );
1434  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1435  ( *enqueue )( context, victim_node, insert_priority );
1436  }
1437  } else {
1438  _Assert( victim_owner == victim_user );
1439  _Assert( _Scheduler_Node_get_idle( victim_node ) == NULL );
1440  idle = victim_owner;
1441  _Scheduler_SMP_Exctract_idle_thread( idle );
1442  }
1443 
1444  return idle;
1445 }
1446 
1447 static inline void _Scheduler_SMP_Set_affinity(
1448  Scheduler_Context *context,
1449  Thread_Control *thread,
1450  Scheduler_Node *node,
1451  void *arg,
1452  Scheduler_SMP_Set_affinity set_affinity,
1453  Scheduler_SMP_Extract extract_from_ready,
1454  Scheduler_SMP_Get_highest_ready get_highest_ready,
1455  Scheduler_SMP_Move move_from_ready_to_scheduled,
1456  Scheduler_SMP_Enqueue enqueue,
1457  Scheduler_SMP_Allocate_processor allocate_processor
1458 )
1459 {
1460  Scheduler_SMP_Node_state node_state;
1461  Priority_Control insert_priority;
1462 
1463  node_state = _Scheduler_SMP_Node_state( node );
1464  insert_priority = _Scheduler_SMP_Node_priority( node );
1465  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1466 
1467  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1468  _Scheduler_SMP_Extract_from_scheduled( context, node );
1469  _Scheduler_SMP_Preempt_and_schedule_highest_ready(
1470  context,
1471  node,
1472  _Thread_Get_CPU( thread ),
1473  extract_from_ready,
1474  get_highest_ready,
1475  move_from_ready_to_scheduled,
1476  allocate_processor
1477  );
1478  ( *set_affinity )( context, node, arg );
1479  ( *enqueue )( context, node, insert_priority );
1480  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1481  ( *extract_from_ready )( context, node );
1482  ( *set_affinity )( context, node, arg );
1483  ( *enqueue )( context, node, insert_priority );
1484  } else {
1485  _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
1486  ( *set_affinity )( context, node, arg );
1487  }
1488 }
1489 
1492 #ifdef __cplusplus
1493 }
1494 #endif /* __cplusplus */
1495 
1496 #endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
RTEMS_INLINE_ROUTINE void _Chain_Append_unprotected(Chain_Control *the_chain, Chain_Node *the_node)
Append a node (unprotected).
Definition: chainimpl.h:679
Scheduler context specialization for SMP schedulers.
Definition: schedulersmp.h:44
Definition: chain.h:65
RTEMS_INLINE_ROUTINE void _Chain_Insert_ordered_unprotected(Chain_Control *the_chain, Chain_Node *to_insert, const void *left, Chain_Node_order order)
Inserts a node into the chain according to the order relation.
Definition: chainimpl.h:860
SMP Scheduler API.
Inlined Routines Associated with the Manipulation of the Priority-Based Scheduling Structures...
This scheduler node is blocked.
Definition: schedulersmp.h:74
Scheduler context.
Definition: scheduler.h:249
Priority_Control priority
The current priority of thread owning this node.
Definition: schedulersmp.h:112
Definition: chain.h:83
Thread_Scheduler_control Scheduler
Scheduler related control.
Definition: thread.h:767
RTEMS_INLINE_ROUTINE Chain_Node * _Chain_Next(const Chain_Node *the_node)
Return pointer the next node from this node.
Definition: chainimpl.h:324
RTEMS_INLINE_ROUTINE void _Chain_Extract_unprotected(Chain_Node *the_node)
Extract this node (unprotected).
Definition: chainimpl.h:557
#define _ISR_Get_level()
Return current interrupt level.
Definition: isrlevel.h:125
RTEMS_INLINE_ROUTINE void _Chain_Initialize_empty(Chain_Control *the_chain)
Initialize this chain as empty.
Definition: chainimpl.h:504
Scheduler_SMP_Node_state state
The state of this node.
Definition: schedulersmp.h:107
#define SCHEDULER_PRIORITY_PURIFY(priority)
Clears the priority append indicator bit.
Definition: schedulerimpl.h:52
uint64_t Priority_Control
The thread priority control.
Definition: priority.h:66
RTEMS_INLINE_ROUTINE bool _Chain_Is_empty(const Chain_Control *the_chain)
Is the chain empty.
Definition: chainimpl.h:390
RTEMS_INLINE_ROUTINE Chain_Node * _Chain_First(const Chain_Control *the_chain)
Return pointer to chain&#39;s first node.
Definition: chainimpl.h:257
Definition: thread.h:728
Per CPU Core Structure.
Definition: percpu.h:290
Chain_Node Node
Definition: objectdata.h:41
Objects_Control Object
Definition: thread.h:730
This scheduler node is ready.
Definition: schedulersmp.h:92
The scheduler node is scheduled.
Definition: schedulersmp.h:84
Scheduler node specialization for SMP schedulers.
Definition: schedulersmp.h:98
RTEMS_INLINE_ROUTINE Chain_Node * _Chain_Tail(Chain_Control *the_chain)
Return pointer to chain tail.
Definition: chainimpl.h:224
bool(* Chain_Node_order)(const void *left, const Chain_Node *right)
Chain node order.
Definition: chainimpl.h:840
Chain Handler API.
Interface to Kernel Print Methods.
struct _Thread_Control * heir
This is the heir thread for this processor.
Definition: percpu.h:378
Scheduler_Node Base
Basic scheduler node.
Definition: schedulersmp.h:102
unsigned context
Definition: tlb.h:108
Scheduler control.
Definition: scheduler.h:266
Scheduler node for per-thread data.
Definition: schedulernode.h:65
RTEMS_INLINE_ROUTINE Chain_Node * _Chain_Get_first_unprotected(Chain_Control *the_chain)
Get the first node (unprotected).
Definition: chainimpl.h:591
Local ISR lock context for acquire and release pairs.
Definition: isrlock.h:65
RTEMS_INLINE_ROUTINE const Chain_Node * _Chain_Immutable_tail(const Chain_Control *the_chain)
Return pointer to immutable chain tail.
Definition: chainimpl.h:240
RTEMS_INLINE_ROUTINE Chain_Node * _Chain_Last(const Chain_Control *the_chain)
Return pointer to chain&#39;s last node.
Definition: chainimpl.h:291
#define SCHEDULER_PRIORITY_APPEND(priority)
Returns the priority control with the append indicator bit set.
Definition: schedulerimpl.h:58
RTEMS_INLINE_ROUTINE void _Chain_Prepend_unprotected(Chain_Control *the_chain, Chain_Node *the_node)
Prepend a node (unprotected).
Definition: chainimpl.h:728
#define NULL
Requests a GPIO pin group configuration.
Definition: bestcomm_api.h:77
Scheduler_SMP_Node_state
SMP scheduler node states.
Definition: schedulersmp.h:68
bool is_idle
Definition: thread.h:792